Datasets:
antoinelb7
commited on
Commit
•
0faa90f
1
Parent(s):
b81147e
Upload 12 files
Browse files- .gitattributes +5 -0
- data/alloprof.csv +3 -0
- data/pages/page-content-en.json +3 -0
- data/pages/page-content-fr.json +3 -0
- data/questions/categories.json +1 -0
- data/questions/comments.json +3 -0
- data/questions/discussions.json +3 -0
- data/questions/grades.json +1 -0
- data/questions/roles.json +0 -0
- data/related_subjects.json +24 -0
- scripts/download_images.py +106 -0
- scripts/parse_data.py +566 -0
- scripts/requirements.txt +3 -0
.gitattributes
CHANGED
@@ -52,3 +52,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
52 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
53 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
54 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
52 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
53 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
54 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
55 |
+
data/alloprof.csv filter=lfs diff=lfs merge=lfs -text
|
56 |
+
data/pages/page-content-en.json filter=lfs diff=lfs merge=lfs -text
|
57 |
+
data/pages/page-content-fr.json filter=lfs diff=lfs merge=lfs -text
|
58 |
+
data/questions/comments.json filter=lfs diff=lfs merge=lfs -text
|
59 |
+
data/questions/discussions.json filter=lfs diff=lfs merge=lfs -text
|
data/alloprof.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:797a2cff4e85d0672918f17851358b68d3d2b15c54bf20f08260839b52c5d860
|
3 |
+
size 391561077
|
data/pages/page-content-en.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0e774b470f5ef18a48738bbecd75988cd409cb3b806524917a0fbae6fbc8313c
|
3 |
+
size 52195417
|
data/pages/page-content-fr.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:552ab80f3e31605fe233b418439cc7e1876c99e6fd1a3a7f8c4394115384b9b6
|
3 |
+
size 60883951
|
data/questions/categories.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
[{"CategoryID": -1, "Name": "Root"}, {"CategoryID": 2, "Name": "Anglais"}, {"CategoryID": 3, "Name": "Chimie"}, {"CategoryID": 4, "Name": "\u00c9ducation financi\u00e8re"}, {"CategoryID": 5, "Name": "Fran\u00e7ais"}, {"CategoryID": 6, "Name": "G\u00e9ographie"}, {"CategoryID": 7, "Name": "Histoire"}, {"CategoryID": 8, "Name": "Math\u00e9matiques"}, {"CategoryID": 9, "Name": "Monde contemporain"}, {"CategoryID": 10, "Name": "Physique"}, {"CategoryID": 11, "Name": "Sciences"}, {"CategoryID": 12, "Name": "Autre"}, {"CategoryID": 13, "Name": "Mathematics"}, {"CategoryID": 14, "Name": "English"}, {"CategoryID": 15, "Name": "History"}, {"CategoryID": 16, "Name": "Science"}, {"CategoryID": 17, "Name": "Physics"}, {"CategoryID": 18, "Name": "Chemistry"}, {"CategoryID": 19, "Name": "Geography"}, {"CategoryID": 20, "Name": "Contemporary World"}, {"CategoryID": 21, "Name": "Financial Education"}, {"CategoryID": 22, "Name": "French"}, {"CategoryID": 23, "Name": "Other"}]
|
data/questions/comments.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:380afab64d5a6152122faa2d77002d33bb853c46cfebe0a5e45362007e5de8f4
|
3 |
+
size 56383516
|
data/questions/discussions.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a1ca84b813c14a0f3f8d9e541cb2ca5cff43ca57a0ecaa3e30c1557e74fac46d
|
3 |
+
size 16264380
|
data/questions/grades.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
[{"GradeID": 0, "Name": "Utilisateur"}, {"GradeID": 1, "Name": "Primaire 1"}, {"GradeID": 2, "Name": "Primaire 2"}, {"GradeID": 3, "Name": "Primaire 3"}, {"GradeID": 4, "Name": "Primaire 4"}, {"GradeID": 5, "Name": "Primaire 5"}, {"GradeID": 6, "Name": "Primaire 6"}, {"GradeID": 7, "Name": "Secondaire 1"}, {"GradeID": 8, "Name": "Secondaire 2"}, {"GradeID": 9, "Name": "Secondaire 3"}, {"GradeID": 10, "Name": "Secondaire 4"}, {"GradeID": 11, "Name": "Secondaire 5"}, {"GradeID": 12, "Name": "Post-secondaire"}, {"GradeID": 13, "Name": "Parent"}, {"GradeID": 14, "Name": "Enseignant"}]
|
data/questions/roles.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/related_subjects.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"chemistry": ["other", "science", "physics"],
|
3 |
+
"contemporary_world": ["other", "history"],
|
4 |
+
"english": ["other"],
|
5 |
+
"financial_ed": ["other"],
|
6 |
+
"french": ["other"],
|
7 |
+
"geography": ["other"],
|
8 |
+
"history": ["other", "contemporary_world"],
|
9 |
+
"math": ["other", "physics"],
|
10 |
+
"other": [
|
11 |
+
"chemistry",
|
12 |
+
"contemporary_world",
|
13 |
+
"english",
|
14 |
+
"financial_ed",
|
15 |
+
"french",
|
16 |
+
"geography",
|
17 |
+
"history",
|
18 |
+
"math",
|
19 |
+
"physics",
|
20 |
+
"science"
|
21 |
+
],
|
22 |
+
"physics": ["other", "science", "math", "chemistry"],
|
23 |
+
"science": ["other", "chemistry", "physics"]
|
24 |
+
}
|
scripts/download_images.py
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/python
|
2 |
+
import json
|
3 |
+
import shutil
|
4 |
+
from pathlib import Path
|
5 |
+
from typing import Any, Iterable
|
6 |
+
|
7 |
+
import httpx
|
8 |
+
import polars as pl
|
9 |
+
import tqdm
|
10 |
+
|
11 |
+
########
|
12 |
+
# main #
|
13 |
+
########
|
14 |
+
|
15 |
+
|
16 |
+
def main() -> None:
|
17 |
+
path = Path(__file__).parent / ".." / "data"
|
18 |
+
load_print("Reading data...")
|
19 |
+
data = read_data(path)
|
20 |
+
urls = extract_image_urls(data)
|
21 |
+
done_print("Read data.")
|
22 |
+
load_print("Downloading images...")
|
23 |
+
urls = download_images(urls, path)
|
24 |
+
with open(path / "images" / "urls.json", "w") as f:
|
25 |
+
json.dump(urls, f, indent=2)
|
26 |
+
done_print("Downloaded images.")
|
27 |
+
|
28 |
+
|
29 |
+
def read_data(path: Path) -> pl.DataFrame:
|
30 |
+
return pl.read_csv(path / "alloprof.csv").with_columns(
|
31 |
+
[
|
32 |
+
pl.col("subject").str.split(";"),
|
33 |
+
pl.col("grade").str.split(";"),
|
34 |
+
pl.col("images").str.split(";"),
|
35 |
+
pl.col("relevant").str.split(";"),
|
36 |
+
pl.col("possible").str.split(";"),
|
37 |
+
]
|
38 |
+
)
|
39 |
+
|
40 |
+
|
41 |
+
def extract_image_urls(data: pl.DataFrame) -> dict[str, int]:
|
42 |
+
return {
|
43 |
+
url: i
|
44 |
+
for i, url in enumerate(
|
45 |
+
set().union(*(set(row) for row in data["images"]))
|
46 |
+
)
|
47 |
+
}
|
48 |
+
|
49 |
+
|
50 |
+
def download_images(urls: dict[str, int], path: Path) -> dict[str, int]:
|
51 |
+
path = path / "images"
|
52 |
+
path.mkdir(exist_ok=True)
|
53 |
+
missing: list[str] = []
|
54 |
+
for url, id_ in load_progress(urls.items(), "Downloading images..."):
|
55 |
+
extension = url.split(".")[-1]
|
56 |
+
if extension in ("jpg", "jpeg", "png"):
|
57 |
+
with httpx.stream("GET", url) as resp:
|
58 |
+
if resp.status_code == 200:
|
59 |
+
with open(path / f"{id_}.{extension}", "wb") as f:
|
60 |
+
for chunk in resp.iter_bytes():
|
61 |
+
if chunk:
|
62 |
+
f.write(chunk)
|
63 |
+
else:
|
64 |
+
missing = [*missing, url]
|
65 |
+
else:
|
66 |
+
missing = [*missing, url]
|
67 |
+
return {url: id_ for url, id_ in urls.items() if url not in missing}
|
68 |
+
|
69 |
+
|
70 |
+
#########
|
71 |
+
# utils #
|
72 |
+
#########
|
73 |
+
|
74 |
+
|
75 |
+
def load_print(text: str, symbol: str = "*") -> None:
|
76 |
+
symbol = f"\033[1m[{symbol}]\033[0m"
|
77 |
+
print(
|
78 |
+
f"\r{symbol} {text}".ljust(shutil.get_terminal_size().columns),
|
79 |
+
end="\r",
|
80 |
+
)
|
81 |
+
|
82 |
+
|
83 |
+
def done_print(text: str, symbol: str = "+") -> None:
|
84 |
+
symbol = f"\033[1m\033[92m[{symbol}]\033[0m"
|
85 |
+
print(f"\r{symbol} {text}".ljust(shutil.get_terminal_size().columns))
|
86 |
+
|
87 |
+
|
88 |
+
def load_progress(
|
89 |
+
iter_: Iterable[Any],
|
90 |
+
text: str,
|
91 |
+
symbol: str = "*",
|
92 |
+
*args: Any,
|
93 |
+
**kwargs: Any,
|
94 |
+
) -> Iterable[Any]:
|
95 |
+
symbol = f"\033[1m[{symbol}]\033[0m"
|
96 |
+
return tqdm.tqdm(
|
97 |
+
iter_,
|
98 |
+
f"\r{symbol} {text}",
|
99 |
+
*args,
|
100 |
+
leave=False,
|
101 |
+
**kwargs,
|
102 |
+
)
|
103 |
+
|
104 |
+
|
105 |
+
if __name__ == "__main__":
|
106 |
+
main()
|
scripts/parse_data.py
ADDED
@@ -0,0 +1,566 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/python
|
2 |
+
import argparse
|
3 |
+
import functools
|
4 |
+
import json
|
5 |
+
import re
|
6 |
+
import shutil
|
7 |
+
from pathlib import Path
|
8 |
+
from typing import Any, Iterator, cast
|
9 |
+
|
10 |
+
import polars as pl
|
11 |
+
|
12 |
+
########
|
13 |
+
# main #
|
14 |
+
########
|
15 |
+
|
16 |
+
|
17 |
+
def main() -> None:
|
18 |
+
args = read_args()
|
19 |
+
|
20 |
+
data = read_data(
|
21 |
+
args.related_subjects, args.grades_lower, args.grades_higher
|
22 |
+
)
|
23 |
+
done_print("Read data.")
|
24 |
+
load_print("Adding possible documents...")
|
25 |
+
data = add_possible_documents(data)
|
26 |
+
done_print("Added possible documents.")
|
27 |
+
|
28 |
+
load_print("Writing data...")
|
29 |
+
path = write_data(data)
|
30 |
+
done_print(
|
31 |
+
f"Wrote data to {path.resolve().relative_to(Path('.').resolve())}."
|
32 |
+
)
|
33 |
+
done_print("Data creation complete.")
|
34 |
+
|
35 |
+
|
36 |
+
def read_args() -> argparse.Namespace:
|
37 |
+
parser = argparse.ArgumentParser(
|
38 |
+
formatter_class=argparse.ArgumentDefaultsHelpFormatter
|
39 |
+
)
|
40 |
+
parser.add_argument(
|
41 |
+
"--related-subjects",
|
42 |
+
default="",
|
43 |
+
help="Path of the related subjects json file"
|
44 |
+
+ " (see data/related_subjects.json)."
|
45 |
+
+ " Leave empty to not use any related subjects.",
|
46 |
+
)
|
47 |
+
parser.add_argument(
|
48 |
+
"--grades-lower",
|
49 |
+
type=int,
|
50 |
+
default=0,
|
51 |
+
help="Number of grades lower than the minimum document grade for"
|
52 |
+
+ " which to consider it possible.",
|
53 |
+
)
|
54 |
+
parser.add_argument(
|
55 |
+
"--grades-higher",
|
56 |
+
type=int,
|
57 |
+
default=0,
|
58 |
+
help="Number of grades higher than the maximum document grade for"
|
59 |
+
+ " which to consider it possible.",
|
60 |
+
)
|
61 |
+
return parser.parse_args()
|
62 |
+
|
63 |
+
|
64 |
+
def read_data(
|
65 |
+
related_subjects_file: str, grades_lower: int, grades_higher: int
|
66 |
+
) -> pl.DataFrame:
|
67 |
+
path = Path(__file__).parent / ".." / "data"
|
68 |
+
load_print("Reading questions...")
|
69 |
+
questions = read_questions(path)
|
70 |
+
done_print("Read questions.")
|
71 |
+
load_print("Reading pages...")
|
72 |
+
pages = read_pages(
|
73 |
+
path, related_subjects_file, grades_lower, grades_higher
|
74 |
+
)
|
75 |
+
done_print("Read pages.")
|
76 |
+
load_print("Combining pages and questions...")
|
77 |
+
return combine_documents(questions, pages)
|
78 |
+
|
79 |
+
|
80 |
+
def combine_documents(
|
81 |
+
questions: pl.DataFrame, pages: pl.DataFrame
|
82 |
+
) -> pl.DataFrame:
|
83 |
+
# create hash of urls to quickly associate links to ids
|
84 |
+
page_hashes = {
|
85 |
+
hash(url): id_ for url, id_ in zip(pages["url"], pages["id"])
|
86 |
+
}
|
87 |
+
|
88 |
+
questions = (
|
89 |
+
questions.with_columns(
|
90 |
+
[
|
91 |
+
# convert all page links to the page ids, making sure each
|
92 |
+
# page can only appear once in the list
|
93 |
+
pl.col("page_links").apply(
|
94 |
+
lambda list_: list(
|
95 |
+
{
|
96 |
+
id_
|
97 |
+
for url in list_
|
98 |
+
if (
|
99 |
+
id_ := page_hashes.get(
|
100 |
+
hash(
|
101 |
+
url.replace(
|
102 |
+
"https://www.alloprof.qc.ca", ""
|
103 |
+
)
|
104 |
+
)
|
105 |
+
)
|
106 |
+
)
|
107 |
+
is not None
|
108 |
+
}
|
109 |
+
)
|
110 |
+
),
|
111 |
+
# convert question links to its id
|
112 |
+
pl.col("question_links").apply(
|
113 |
+
lambda list_: list(
|
114 |
+
{
|
115 |
+
id_
|
116 |
+
for x in list_
|
117 |
+
if (
|
118 |
+
id_ := x.replace(
|
119 |
+
"https://www.alloprof.qc.ca/zonedentraide/discussion/", # noqa
|
120 |
+
"",
|
121 |
+
).split("/")[0]
|
122 |
+
)
|
123 |
+
!= "https:"
|
124 |
+
}
|
125 |
+
)
|
126 |
+
),
|
127 |
+
]
|
128 |
+
)
|
129 |
+
# combine page and question links in a single list of ids
|
130 |
+
.with_columns(
|
131 |
+
pl.col("page_links").arr.concat("question_links").alias("links")
|
132 |
+
)
|
133 |
+
.drop(["page_links", "question_links"])
|
134 |
+
.with_columns(
|
135 |
+
[
|
136 |
+
# to identify which document is a question or a page
|
137 |
+
pl.lit(True).alias("is_query"),
|
138 |
+
# to be able to find the actual question on the website
|
139 |
+
pl.col("id")
|
140 |
+
.apply(
|
141 |
+
lambda x: f"https://www.alloprof.qc.ca/zonedentraide/discussion/{x}" # noqa
|
142 |
+
)
|
143 |
+
.alias("url"),
|
144 |
+
]
|
145 |
+
)
|
146 |
+
)
|
147 |
+
|
148 |
+
pages = (
|
149 |
+
pages
|
150 |
+
# add columns needed to concatenate to questions
|
151 |
+
.with_columns(
|
152 |
+
[
|
153 |
+
pl.col("id")
|
154 |
+
.apply(lambda _: [])
|
155 |
+
.cast(pl.List(pl.Utf8))
|
156 |
+
.alias("links"),
|
157 |
+
pl.lit(False).alias("is_query"),
|
158 |
+
pl.col("id")
|
159 |
+
.apply(lambda _: [])
|
160 |
+
.cast(pl.List(pl.Utf8))
|
161 |
+
.alias("images"),
|
162 |
+
pl.col("url").apply(
|
163 |
+
lambda x: f"https://www.alloprof.qc.ca{x}"
|
164 |
+
),
|
165 |
+
]
|
166 |
+
)
|
167 |
+
# have pages repeated for each grade and subject it's relevant for
|
168 |
+
.explode("grade").explode("subject")
|
169 |
+
)
|
170 |
+
|
171 |
+
return (
|
172 |
+
pl.concat([questions, pages], how="diagonal")
|
173 |
+
.rename({"links": "relevant"})
|
174 |
+
.with_columns(pl.col("relevant").apply(sorted))
|
175 |
+
)
|
176 |
+
|
177 |
+
|
178 |
+
def add_possible_documents(data: pl.DataFrame) -> pl.DataFrame:
|
179 |
+
# extract list of possible documents for each combination of subject,
|
180 |
+
# grade and language
|
181 |
+
data = data.with_columns(
|
182 |
+
(
|
183 |
+
pl.col("subject")
|
184 |
+
+ ","
|
185 |
+
+ pl.col("grade")
|
186 |
+
+ ","
|
187 |
+
+ pl.col("language")
|
188 |
+
).alias("categories")
|
189 |
+
)
|
190 |
+
possible = (
|
191 |
+
data.select(["id", "categories"])
|
192 |
+
.unique()
|
193 |
+
.groupby("categories")
|
194 |
+
.agg(pl.list("id"))
|
195 |
+
.rename({"id": "possible"})
|
196 |
+
)
|
197 |
+
# add possible documents only to questions
|
198 |
+
data = pl.concat(
|
199 |
+
[
|
200 |
+
data.filter(pl.col("is_query"))
|
201 |
+
.join(possible, on="categories")
|
202 |
+
.drop("categories"),
|
203 |
+
data.filter(~pl.col("is_query")).with_columns(
|
204 |
+
pl.col("id")
|
205 |
+
.apply(lambda _: [])
|
206 |
+
.cast(pl.List(pl.Utf8))
|
207 |
+
.alias("possible")
|
208 |
+
),
|
209 |
+
],
|
210 |
+
how="diagonal",
|
211 |
+
)
|
212 |
+
# concatenate all subjects and grades for each document so there is only
|
213 |
+
# a single unique document per line
|
214 |
+
return (
|
215 |
+
# combine all grades for each id and subject
|
216 |
+
data.groupby(["id", "subject"])
|
217 |
+
.agg([pl.exclude("grade").first(), pl.list("grade")])
|
218 |
+
# combine all subjects for each id
|
219 |
+
.groupby("id")
|
220 |
+
.agg([pl.exclude("subject").first(), pl.list("subject")])
|
221 |
+
# remove duplicate grades
|
222 |
+
.with_columns(pl.col("grade").arr.unique())
|
223 |
+
)
|
224 |
+
|
225 |
+
|
226 |
+
def write_data(data: pl.DataFrame) -> Path:
|
227 |
+
path = Path(__file__).parent / ".." / "data" / "alloprof.csv"
|
228 |
+
data = data.with_columns(
|
229 |
+
[
|
230 |
+
pl.col("subject").arr.join(";"),
|
231 |
+
pl.col("grade").arr.join(";"),
|
232 |
+
pl.col("images").arr.join(";"),
|
233 |
+
pl.col("relevant").arr.join(";"),
|
234 |
+
pl.col("possible").arr.join(";"),
|
235 |
+
]
|
236 |
+
)
|
237 |
+
data.write_csv(path)
|
238 |
+
return path
|
239 |
+
|
240 |
+
|
241 |
+
#############
|
242 |
+
# questions #
|
243 |
+
#############
|
244 |
+
|
245 |
+
|
246 |
+
def read_questions(path: Path) -> pl.DataFrame:
|
247 |
+
path = path / "questions"
|
248 |
+
questions = read_questions_(path)
|
249 |
+
answers = read_answers(path)
|
250 |
+
grades = read_grades(path)
|
251 |
+
subjects = read_subjects(path)
|
252 |
+
|
253 |
+
return (
|
254 |
+
questions
|
255 |
+
# convert subject and grade ids to their name
|
256 |
+
.join(subjects, on="CategoryID")
|
257 |
+
.drop("CategoryID")
|
258 |
+
.join(grades, on="GradeID")
|
259 |
+
.drop("GradeID")
|
260 |
+
# add answers and extract links and images
|
261 |
+
.join(answers, on="id", how="left")
|
262 |
+
.pipe(extract_relevant_links)
|
263 |
+
.with_columns(
|
264 |
+
[
|
265 |
+
pl.col("id").cast(pl.Utf8), # to make it consistent with pages
|
266 |
+
pl.col("text").apply(extract_text_from_json),
|
267 |
+
pl.col("text").apply(extract_images_from_json).alias("images"),
|
268 |
+
]
|
269 |
+
)
|
270 |
+
)
|
271 |
+
|
272 |
+
|
273 |
+
def read_questions_(path: Path) -> pl.DataFrame:
|
274 |
+
return pl.read_json(path / "discussions.json").select(
|
275 |
+
[
|
276 |
+
pl.col("DiscussionID").alias("id"),
|
277 |
+
pl.col("Body").alias("text"),
|
278 |
+
pl.col("Language").alias("language"),
|
279 |
+
pl.col("InsertUserID").alias("user"),
|
280 |
+
pl.col("CategoryID"),
|
281 |
+
pl.col("GradeID"),
|
282 |
+
]
|
283 |
+
)
|
284 |
+
|
285 |
+
|
286 |
+
def read_answers(path: Path) -> pl.DataFrame:
|
287 |
+
return (
|
288 |
+
pl.read_json(path / "comments.json")
|
289 |
+
.filter(~pl.col("DateAccepted").is_null())
|
290 |
+
.select(
|
291 |
+
[
|
292 |
+
pl.col("DiscussionID").alias("id"),
|
293 |
+
pl.col("Body").alias("answer"),
|
294 |
+
]
|
295 |
+
)
|
296 |
+
)
|
297 |
+
|
298 |
+
|
299 |
+
def read_grades(path: Path) -> pl.DataFrame:
|
300 |
+
return pl.read_json(path / "grades.json").select(
|
301 |
+
[pl.col("GradeID"), pl.col("Name").alias("grade")]
|
302 |
+
)
|
303 |
+
|
304 |
+
|
305 |
+
def read_subjects(path: Path) -> pl.DataFrame:
|
306 |
+
|
307 |
+
return pl.read_json(path / "categories.json").select(
|
308 |
+
[
|
309 |
+
pl.col("CategoryID"),
|
310 |
+
# convert french subjects to english to make them consistent with
|
311 |
+
# pages
|
312 |
+
pl.col("Name").apply(convert_subject).alias("subject"),
|
313 |
+
]
|
314 |
+
)
|
315 |
+
|
316 |
+
|
317 |
+
def extract_relevant_links(data: pl.DataFrame) -> pl.DataFrame:
|
318 |
+
def extract_links(text: str) -> list[str]:
|
319 |
+
return list(
|
320 |
+
set(
|
321 |
+
re.findall(
|
322 |
+
r"(https?:(?:\\)?/(?:\\)?/[a-zA-Z0-9/\\\.-]+)",
|
323 |
+
text.replace("\\/", "/"),
|
324 |
+
)
|
325 |
+
)
|
326 |
+
)
|
327 |
+
|
328 |
+
def extract_page_links(links: list[str]) -> list[str]:
|
329 |
+
return [link for link in links if "/eleves/bv/" in link]
|
330 |
+
|
331 |
+
def extract_question_links(links: list[str]) -> list[str]:
|
332 |
+
return [link for link in links if "/zonedentraide/discussion" in link]
|
333 |
+
|
334 |
+
return (
|
335 |
+
data.with_columns(pl.col("answer").fill_null(""))
|
336 |
+
.with_columns(pl.col("answer").apply(extract_links).alias("links"))
|
337 |
+
.with_columns(
|
338 |
+
[
|
339 |
+
pl.col("links").apply(extract_page_links).alias("page_links"),
|
340 |
+
pl.col("links")
|
341 |
+
.apply(extract_question_links)
|
342 |
+
.alias("question_links"),
|
343 |
+
]
|
344 |
+
)
|
345 |
+
)
|
346 |
+
|
347 |
+
|
348 |
+
def extract_text_from_json(json_: str) -> str:
|
349 |
+
|
350 |
+
try:
|
351 |
+
return " ".join(list(extract_text(json.loads(json_))))
|
352 |
+
except json.JSONDecodeError:
|
353 |
+
return ""
|
354 |
+
|
355 |
+
|
356 |
+
def extract_text(raw_section: list[dict] | dict) -> Iterator[str]:
|
357 |
+
if isinstance(raw_section, list):
|
358 |
+
for section_content in raw_section:
|
359 |
+
yield from extract_text(section_content)
|
360 |
+
|
361 |
+
elif isinstance(raw_section, dict):
|
362 |
+
for section_tag, section_content in raw_section.items():
|
363 |
+
if section_tag == "insert" and isinstance(section_content, str):
|
364 |
+
yield re.sub(r"\s+", " ", section_content.strip())
|
365 |
+
elif section_tag == "url":
|
366 |
+
yield section_content.strip() # type: ignore
|
367 |
+
else:
|
368 |
+
yield from extract_text(section_content)
|
369 |
+
|
370 |
+
|
371 |
+
def extract_images_from_json(json_: str) -> list[str]:
|
372 |
+
|
373 |
+
try:
|
374 |
+
return list(extract_images(json.loads(json_)))
|
375 |
+
except json.JSONDecodeError:
|
376 |
+
return []
|
377 |
+
|
378 |
+
|
379 |
+
def extract_images(raw_section: list[dict] | dict) -> Iterator[str]:
|
380 |
+
if isinstance(raw_section, list):
|
381 |
+
for section_content in raw_section:
|
382 |
+
yield from extract_images(section_content)
|
383 |
+
|
384 |
+
elif isinstance(raw_section, dict):
|
385 |
+
for section_tag, section_content in raw_section.items():
|
386 |
+
if section_tag == "url":
|
387 |
+
yield cast(str, section_content)
|
388 |
+
else:
|
389 |
+
yield from extract_images(section_content)
|
390 |
+
|
391 |
+
|
392 |
+
#########
|
393 |
+
# pages #
|
394 |
+
#########
|
395 |
+
|
396 |
+
|
397 |
+
def read_pages(
|
398 |
+
path: Path,
|
399 |
+
related_subjects_file: str,
|
400 |
+
grades_lower: int,
|
401 |
+
grades_higher: int,
|
402 |
+
) -> pl.DataFrame:
|
403 |
+
grades = read_grades(path / "questions")
|
404 |
+
fr_pages = pl.read_json(path / "pages" / "page-content-fr.json")["data"]
|
405 |
+
en_pages = pl.read_json(path / "pages" / "page-content-en.json")["data"]
|
406 |
+
return (
|
407 |
+
pl.DataFrame(
|
408 |
+
[parse_page_data(page) for page in [*fr_pages, *en_pages]]
|
409 |
+
)
|
410 |
+
.with_columns(
|
411 |
+
pl.col("subject")
|
412 |
+
.apply(convert_subject)
|
413 |
+
.apply(lambda subject: [subject])
|
414 |
+
)
|
415 |
+
.filter(pl.col("url") != "")
|
416 |
+
.pipe(
|
417 |
+
functools.partial(
|
418 |
+
convert_grades,
|
419 |
+
grades=grades,
|
420 |
+
grades_lower=grades_lower,
|
421 |
+
grades_higher=grades_higher,
|
422 |
+
)
|
423 |
+
)
|
424 |
+
.pipe(
|
425 |
+
functools.partial(
|
426 |
+
add_related_subjects,
|
427 |
+
related_subjects_file=related_subjects_file,
|
428 |
+
)
|
429 |
+
)
|
430 |
+
.pipe(extract_page_text)
|
431 |
+
)
|
432 |
+
|
433 |
+
|
434 |
+
def parse_page_data(data: dict[str, Any]) -> dict[str, str | int | list[str]]:
|
435 |
+
try:
|
436 |
+
page = {
|
437 |
+
"id": data["file"]["uuid"],
|
438 |
+
"url": data["file"]["breadcrumbs"]["current"]["routerLink"],
|
439 |
+
"language": data["file"]["lang"],
|
440 |
+
"subject": data["file"]["topic"],
|
441 |
+
"grade": data["file"]["levels"],
|
442 |
+
"title": data["file"]["title"],
|
443 |
+
"tags": data["file"]["tags"],
|
444 |
+
"content": " ".join(
|
445 |
+
d["attributes"]["content"]
|
446 |
+
for d in data["file"]["metatags"]
|
447 |
+
if d["attributes"]["content"]
|
448 |
+
),
|
449 |
+
}
|
450 |
+
return {**page, "id": f"{page['id']}-{page['language']}"}
|
451 |
+
except TypeError:
|
452 |
+
return {}
|
453 |
+
|
454 |
+
|
455 |
+
def convert_grades(
|
456 |
+
data: pl.DataFrame,
|
457 |
+
grades: pl.DataFrame,
|
458 |
+
grades_lower: int,
|
459 |
+
grades_higher: int,
|
460 |
+
) -> pl.DataFrame:
|
461 |
+
return (
|
462 |
+
# add grades lower and higher
|
463 |
+
data.with_columns(
|
464 |
+
pl.col("grade").apply(
|
465 |
+
lambda grades_: (
|
466 |
+
list(
|
467 |
+
range(
|
468 |
+
max(min(grades_) - grades_lower, 1),
|
469 |
+
min(grades_),
|
470 |
+
)
|
471 |
+
)
|
472 |
+
+ list(grades_)
|
473 |
+
+ list(
|
474 |
+
range(
|
475 |
+
max(grades_) + 1,
|
476 |
+
min(max(grades_) + grades_higher, 12) + 1,
|
477 |
+
)
|
478 |
+
)
|
479 |
+
)
|
480 |
+
if grades_ is not None
|
481 |
+
else []
|
482 |
+
)
|
483 |
+
)
|
484 |
+
# convert grades to their name
|
485 |
+
.with_columns(
|
486 |
+
pl.col("grade").apply(
|
487 |
+
lambda grades_: pl.DataFrame({"GradeID": grades_})
|
488 |
+
.join(grades, on="GradeID", how="left")["grade"]
|
489 |
+
.to_list()
|
490 |
+
)
|
491 |
+
)
|
492 |
+
)
|
493 |
+
|
494 |
+
|
495 |
+
def add_related_subjects(
|
496 |
+
data: pl.DataFrame, related_subjects_file: str
|
497 |
+
) -> pl.DataFrame:
|
498 |
+
if related_subjects_file == "":
|
499 |
+
return data
|
500 |
+
else:
|
501 |
+
with open(related_subjects_file) as f:
|
502 |
+
related_subjects = json.load(f)
|
503 |
+
return data.with_columns(
|
504 |
+
pl.col("subject").apply(
|
505 |
+
lambda subject: list(subject) + related_subjects[subject[0]]
|
506 |
+
)
|
507 |
+
)
|
508 |
+
|
509 |
+
|
510 |
+
def extract_page_text(data: pl.DataFrame) -> pl.DataFrame:
|
511 |
+
return data.with_columns(
|
512 |
+
(
|
513 |
+
pl.col("title")
|
514 |
+
+ " "
|
515 |
+
+ pl.col("tags").arr.join(" ")
|
516 |
+
+ " "
|
517 |
+
+ pl.col("content")
|
518 |
+
).alias("text")
|
519 |
+
).drop(["title", "tags", "content"])
|
520 |
+
|
521 |
+
|
522 |
+
#########
|
523 |
+
# utils #
|
524 |
+
#########
|
525 |
+
|
526 |
+
|
527 |
+
def load_print(text: str, symbol: str = "*") -> None:
|
528 |
+
symbol = f"\033[1m[{symbol}]\033[0m"
|
529 |
+
print(
|
530 |
+
f"\r{symbol} {text}".ljust(shutil.get_terminal_size().columns),
|
531 |
+
end="\r",
|
532 |
+
)
|
533 |
+
|
534 |
+
|
535 |
+
def done_print(text: str, symbol: str = "+") -> None:
|
536 |
+
symbol = f"\033[1m\033[92m[{symbol}]\033[0m"
|
537 |
+
print(f"\r{symbol} {text}".ljust(shutil.get_terminal_size().columns))
|
538 |
+
|
539 |
+
|
540 |
+
def convert_subject(subject: str) -> str:
|
541 |
+
subject_conversions = {
|
542 |
+
"chemistry": ["chimie"],
|
543 |
+
"contemporary_world": ["monde contemporain", "contemporary world"],
|
544 |
+
"english": ["anglais"],
|
545 |
+
"financial_ed": ["éducation financière", "financial education"],
|
546 |
+
"french": ["français"],
|
547 |
+
"geography": ["géographie"],
|
548 |
+
"history": ["histoire"],
|
549 |
+
"math": ["mathématiques", "mathematics"],
|
550 |
+
"other": ["autre"],
|
551 |
+
"physics": ["physique"],
|
552 |
+
"science": ["sciences"],
|
553 |
+
}
|
554 |
+
match = [
|
555 |
+
key
|
556 |
+
for key, val in subject_conversions.items()
|
557 |
+
if subject.lower() in [key, *val]
|
558 |
+
]
|
559 |
+
if match:
|
560 |
+
return match[0]
|
561 |
+
else:
|
562 |
+
return "other"
|
563 |
+
|
564 |
+
|
565 |
+
if __name__ == "__main__":
|
566 |
+
main()
|
scripts/requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
httpx
|
2 |
+
polars
|
3 |
+
tqdm
|