Datasets:
Fixed HTML escape and added min message count.
Browse files- boards/a.json +2 -2
- boards/b.json +2 -2
- boards/bant.json +2 -2
- boards/biz.json +2 -2
- boards/ck.json +2 -2
- boards/co.json +2 -2
- boards/diy.json +2 -2
- boards/fit.json +2 -2
- boards/g.json +2 -2
- boards/his.json +2 -2
- boards/int.json +2 -2
- boards/jp.json +2 -2
- boards/k.json +2 -2
- boards/lit.json +2 -2
- boards/m.json +2 -2
- boards/mu.json +2 -2
- boards/out.json +2 -2
- boards/r9k.json +2 -2
- boards/sci.json +2 -2
- boards/trash.json +2 -2
- boards/v.json +2 -2
- boards/vg.json +2 -2
- boards/vmg.json +2 -2
- boards/vt.json +2 -2
- condense.py +2 -1
- merge.py +9 -2
- merged_strings_train.jsonl +2 -2
boards/a.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0a5fc73eb9f1486b67e5898a19b7c179429a65a32ab32859fe2667f3d1bcc187
|
3 |
+
size 32697664
|
boards/b.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5234debf175abd4357041759b56660adc48251fb862c0ce872b32768da89cc73
|
3 |
+
size 13251396
|
boards/bant.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:99908c0da76a7e800f1eb6b9755cbf0e7ec633f70dba6d77ac1cbbb1f07b50c6
|
3 |
+
size 7204467
|
boards/biz.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:37c97ed0aac7febcb871ef552d2729c5a7f1e1cd841487e12204083f179afccc
|
3 |
+
size 17479781
|
boards/ck.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6245fb53c575feaf581a6f428a7e167c860e6da30a9d938cdc9d6cff870ebf01
|
3 |
+
size 9456185
|
boards/co.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d4fff3f87e831c1ecd36299ac3b0f0d1488dfe6451c35976417b9bf6ffdb12df
|
3 |
+
size 26055247
|
boards/diy.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c68cd232e94751e56ebab7424c2d35fce55f629c58f095a0c44629492fe34b91
|
3 |
+
size 4430720
|
boards/fit.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:31b0d8ff2e5bec15dbbaa28e48c324efe6f826c1554de7198900ab6e9f7f4875
|
3 |
+
size 17067876
|
boards/g.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1d0889dd91cd5b37bd51ecc3492d68c462f9afc59cffecab2792aefb0d0fdefb
|
3 |
+
size 41156888
|
boards/his.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:355d21e52eabdb8cadacc84e7388bc03a57cc6064200f696de4b9955ba7996ea
|
3 |
+
size 14336670
|
boards/int.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1a7887ec1e83bbb2d17c2326f07a68e1182912215ed8a9c38b1f8a6e9a0e3798
|
3 |
+
size 46106098
|
boards/jp.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f666c7857968b9c011bd3285e6a6a6f9914bfed797df6e185e009f4e7a977e99
|
3 |
+
size 12652996
|
boards/k.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b18db8afbdb455c169bbc0a35a55e9a8d864c06befa43dc401dda6bba4d7e5e4
|
3 |
+
size 27920419
|
boards/lit.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e03ff273a2d98889200da4d8fc026f6d45cc1fbe75dbfcb09488d90fbfebda17
|
3 |
+
size 15023797
|
boards/m.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e7f69b28c7a8a7a95d8647f52b198bae8d31172b094bf58cfe33d68a772cd424
|
3 |
+
size 10304278
|
boards/mu.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d2a833d8d62334dcc7213f810a8024f4e41f01d80055e8be8ad5699cbca53957
|
3 |
+
size 11121936
|
boards/out.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:988645ac00f57e9783282c2f6a76c0abeff39e8f538f43733866b90ba5d2f6e7
|
3 |
+
size 3763284
|
boards/r9k.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:15258673bfe5d6704de39577dd44772b13af0dc4dc557ce7be7ac419fa85be6a
|
3 |
+
size 34355927
|
boards/sci.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2beec036bb03134fa056e26c11b2175d0f079a34fec903b2ed7ae42eae59a3ec
|
3 |
+
size 10232156
|
boards/trash.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a640afc865c25f4637b0b74ad1836fa6555c77b5fce078af84dcc08d091bb853
|
3 |
+
size 26691642
|
boards/v.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ea8578431008a35be92ea86876b2a25faea683015919eb17b03a7fa537f0344e
|
3 |
+
size 67911016
|
boards/vg.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b56c3c055aedbe3e932c3e286781e382a9bb2860245230cfb4a50b52e7d695a2
|
3 |
+
size 117204636
|
boards/vmg.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:93f8513f7be64aab614c76262e52e4610b0f1a816ae414c0424784b2f7370fd3
|
3 |
+
size 6490402
|
boards/vt.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d8b3340c68c7c656abad784b10c3650829e477eae292ae03de5b2070f791717f
|
3 |
+
size 28686830
|
condense.py
CHANGED
@@ -2,6 +2,7 @@ import os
|
|
2 |
import re
|
3 |
import sys
|
4 |
import json
|
|
|
5 |
|
6 |
MESSAGE_SPLIT_PATTERN = re.compile(r"\n--- (?=\d+ *\n)")
|
7 |
REPLY_ID_PATTERN = re.compile(r">>(\d+)")
|
@@ -44,7 +45,7 @@ def main():
|
|
44 |
nonlocal replace_count
|
45 |
replace_count += 1
|
46 |
return f">>{orig_to_norm.get(int(match.group(1)), "unknown")}"
|
47 |
-
line = REPLY_ID_PATTERN.sub(replace_id, line)
|
48 |
if not message_has_content and replace_count == 0 and line.strip():
|
49 |
message_has_content = True
|
50 |
if j != 1:
|
|
|
2 |
import re
|
3 |
import sys
|
4 |
import json
|
5 |
+
import html
|
6 |
|
7 |
MESSAGE_SPLIT_PATTERN = re.compile(r"\n--- (?=\d+ *\n)")
|
8 |
REPLY_ID_PATTERN = re.compile(r">>(\d+)")
|
|
|
45 |
nonlocal replace_count
|
46 |
replace_count += 1
|
47 |
return f">>{orig_to_norm.get(int(match.group(1)), "unknown")}"
|
48 |
+
line = html.unescape(REPLY_ID_PATTERN.sub(replace_id, line))
|
49 |
if not message_has_content and replace_count == 0 and line.strip():
|
50 |
message_has_content = True
|
51 |
if j != 1:
|
merge.py
CHANGED
@@ -9,6 +9,7 @@ def parse_args():
|
|
9 |
parser = argparse.ArgumentParser(description="Merge the boards into a single train set.")
|
10 |
parser.add_argument("-t", "--tokenizer-name-or-path", default="meta-llama/Llama-3.1-8B", help="The name or path for the tokenizer")
|
11 |
parser.add_argument("-l", "--limit", type=int, default=4096, help="Length limit in tokens for each post")
|
|
|
12 |
parser.add_argument("-i", "--id", default="<|start_header_id|>", help="Prefix token for message IDs")
|
13 |
parser.add_argument("-c", "--content", default="<|end_header_id|>", help="Prefix token for message contents")
|
14 |
return parser.parse_args()
|
@@ -21,6 +22,7 @@ def main():
|
|
21 |
if args.content not in tokenizer.vocab:
|
22 |
print(f"The message content prefix token \"{args.content}\" is not a token in \"{args.tokenizer_name_or_path}\", it will work but it's better to be a token in the tokenizer.")
|
23 |
boards_dir = "boards"
|
|
|
24 |
with open("merged_strings_train.jsonl", "w", encoding="utf8") as output:
|
25 |
for board_path in tqdm.tqdm(os.listdir(boards_dir), desc="Boards"):
|
26 |
board_name, ext = os.path.splitext(board_path)
|
@@ -32,17 +34,22 @@ def main():
|
|
32 |
with open(board_path, "r", encoding="utf8") as f:
|
33 |
board = json.load(f)
|
34 |
for post in tqdm.tqdm(board, desc="Posts"):
|
|
|
|
|
35 |
post_content = board_name
|
36 |
post_token_count = len(tokenizer.encode(post_content, add_special_tokens=False)) + 2 # Add 2 for the start of string and end of string tokens.
|
37 |
for message in post:
|
38 |
formatted = f"{args.id}{message["id"]}{args.content}{message["content"]}"
|
39 |
formatted_token_count = len(tokenizer.encode(formatted, add_special_tokens=False))
|
40 |
-
post_token_count
|
41 |
-
if
|
42 |
break
|
43 |
post_content += formatted
|
|
|
44 |
json.dump({"input": "", "output": post_content}, output, ensure_ascii=False)
|
45 |
output.write("\n")
|
|
|
|
|
46 |
|
47 |
if __name__ == "__main__":
|
48 |
try:
|
|
|
9 |
parser = argparse.ArgumentParser(description="Merge the boards into a single train set.")
|
10 |
parser.add_argument("-t", "--tokenizer-name-or-path", default="meta-llama/Llama-3.1-8B", help="The name or path for the tokenizer")
|
11 |
parser.add_argument("-l", "--limit", type=int, default=4096, help="Length limit in tokens for each post")
|
12 |
+
parser.add_argument("-m", "--min", type=int, default=5, help="Minimum amount of message in each post")
|
13 |
parser.add_argument("-i", "--id", default="<|start_header_id|>", help="Prefix token for message IDs")
|
14 |
parser.add_argument("-c", "--content", default="<|end_header_id|>", help="Prefix token for message contents")
|
15 |
return parser.parse_args()
|
|
|
22 |
if args.content not in tokenizer.vocab:
|
23 |
print(f"The message content prefix token \"{args.content}\" is not a token in \"{args.tokenizer_name_or_path}\", it will work but it's better to be a token in the tokenizer.")
|
24 |
boards_dir = "boards"
|
25 |
+
total_token_count = 0
|
26 |
with open("merged_strings_train.jsonl", "w", encoding="utf8") as output:
|
27 |
for board_path in tqdm.tqdm(os.listdir(boards_dir), desc="Boards"):
|
28 |
board_name, ext = os.path.splitext(board_path)
|
|
|
34 |
with open(board_path, "r", encoding="utf8") as f:
|
35 |
board = json.load(f)
|
36 |
for post in tqdm.tqdm(board, desc="Posts"):
|
37 |
+
if len(post) < args.min:
|
38 |
+
continue
|
39 |
post_content = board_name
|
40 |
post_token_count = len(tokenizer.encode(post_content, add_special_tokens=False)) + 2 # Add 2 for the start of string and end of string tokens.
|
41 |
for message in post:
|
42 |
formatted = f"{args.id}{message["id"]}{args.content}{message["content"]}"
|
43 |
formatted_token_count = len(tokenizer.encode(formatted, add_special_tokens=False))
|
44 |
+
added_token_count = post_token_count + formatted_token_count
|
45 |
+
if added_token_count > args.limit:
|
46 |
break
|
47 |
post_content += formatted
|
48 |
+
post_token_count = added_token_count
|
49 |
json.dump({"input": "", "output": post_content}, output, ensure_ascii=False)
|
50 |
output.write("\n")
|
51 |
+
total_token_count += post_token_count
|
52 |
+
print("Merge finished, total token count:", total_token_count)
|
53 |
|
54 |
if __name__ == "__main__":
|
55 |
try:
|
merged_strings_train.jsonl
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d238c40138145de7d25cce4b9bb9ec6dec74b6d0b35bcc2c9badffdda3650948
|
3 |
+
size 354786900
|