kanhatakeyama commited on
Commit
d9b5306
1 Parent(s): 6aafb67

Upload 2 files

Browse files
Files changed (2) hide show
  1. Touten.py +79 -0
  2. load_jap.py +136 -0
Touten.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import MeCab
3
+ # 形態素解析器の初期化
4
+ mecab = MeCab.Tagger()
5
+
6
+
7
+ def check_end_type(line):
8
+ parsed = mecab.parse(line)
9
+ # print(parsed)
10
+ parts = parsed.split("\n")
11
+ final_part = parts[-3]
12
+ temp = final_part.split("\t")
13
+ end_type = temp[-2]
14
+ hinshi = temp[4]
15
+ return end_type, hinshi
16
+
17
+
18
+ def touten_insert(text):
19
+ new_lines = []
20
+ old_line = ""
21
+ current_line = ""
22
+ non_kaiyo = False
23
+
24
+ for line in text.split("\n"):
25
+ if line == "":
26
+ continue
27
+ if line == old_line:
28
+ continue
29
+ old_line = line
30
+
31
+ if non_kaiyo:
32
+ current_line += line
33
+ # print("non_kaiyo added", line)
34
+ non_kaiyo = False
35
+ continue
36
+
37
+ end_type, hinshi = check_end_type(line)
38
+ if end_type.find("終止形") >= 0 or end_type.find("命令") >= 0:
39
+ current_line += line+"。"
40
+ elif hinshi.find("名詞") >= 0 or hinshi.find("記号"):
41
+ new_lines.append(current_line)
42
+ current_line = ""
43
+ new_lines.append(line)
44
+ else:
45
+ current_line += line
46
+ new_lines.append(current_line)
47
+ new_lines = [i for i in new_lines if i != ""]
48
+
49
+ return "\n".join(new_lines)
50
+
51
+
52
+ def end_filter(line):
53
+ end_symbols = [
54
+ "、", "(", "("
55
+ ]
56
+ for symb in end_symbols:
57
+ if line.endswith(symb):
58
+ return True
59
+ return False
60
+
61
+
62
+ def del_kaigyo(text):
63
+ new_lines = []
64
+ temp_line = ""
65
+ for line in text.split("\n"):
66
+ # print(line)
67
+ if end_filter(line) or end_filter(temp_line):
68
+ # print("integ:", temp_line, "----", line)
69
+ temp_line += line
70
+
71
+ else:
72
+ if temp_line != "":
73
+ new_lines.append(temp_line)
74
+ new_lines.append(line)
75
+ temp_line = ""
76
+
77
+ # if line.startswith("を") or line.startswith("は"):
78
+ # current_line += line
79
+ return "\n".join(new_lines)
load_jap.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # %%
2
+ # jap2010を読み込んでparquetにするcode。実行不要
3
+
4
+ """
5
+ テキストアーカイブ - 日本語ウェブコーパス 2010
6
+ http://www.s-yata.jp/corpus/nwc2010/texts/
7
+ を処理するcode。
8
+
9
+ #URLは常時は空いていないので注意。
10
+ wget http://www.s-yata.jp/corpus/nwc2010/texts/filelist
11
+ wget -i filelist
12
+ """
13
+
14
+ # %%
15
+ from datasets import load_dataset
16
+ import glob
17
+ import pandas as pd
18
+ import lzma
19
+ from Touten import touten_insert, del_kaigyo
20
+
21
+ # %%
22
+ import os
23
+
24
+ out_dir = "../data/jap2010_parquet"
25
+ if not os.path.exists(out_dir):
26
+ os.mkdir(out_dir)
27
+
28
+ # %%
29
+
30
+
31
+ # %%
32
+ count_threshold = 3
33
+ max_lines = 2000
34
+
35
+
36
+ task_name = "00000001"
37
+ for xz_path in glob.glob("../data/jap2010/*.xz"):
38
+ task_name = xz_path.split("/")[-1].split(".")[0]
39
+ print(task_name)
40
+ path = f"../data/jap2010/{task_name}.xz"
41
+ # .xz ファイルを開いて内容をチャンクで読み込む
42
+ with lzma.open(path, 'rt') as file:
43
+ lines = file.readlines()
44
+
45
+ new_line_count = 0
46
+
47
+ current_doc = []
48
+ cnt = 0
49
+ text_len = 0
50
+ text_list = []
51
+ for line in (lines):
52
+ if line == "\n":
53
+ new_line_count += 1
54
+ else:
55
+ new_line_count = 0
56
+ current_doc.append(line)
57
+ if new_line_count >= count_threshold or len(current_doc) > max_lines:
58
+ text = "\n".join(current_doc)
59
+ text = text.strip()
60
+ text = text.replace("\n\n", "\n")
61
+ text = touten_insert(text)
62
+ text = del_kaigyo(text)
63
+ current_doc = []
64
+ new_line_count = 0
65
+
66
+ """
67
+ if text!="":
68
+ d=json.dumps({"text":text},ensure_ascii=False)
69
+ with open(f"{out_dir}/{task_name}.jsonl","a") as f:
70
+ f.write(f"{d}\n")
71
+ """
72
+ if text != "":
73
+ # print(len(text))
74
+ text_list.append(text)
75
+ text_len += len(text)
76
+
77
+ if text_len > 10**8:
78
+ # if len(text_list)>10:
79
+ # break
80
+
81
+ df = pd.DataFrame(text_list, columns=["text"]) # .reset_index()
82
+ df.to_parquet(f"{out_dir}/{task_name}_{cnt}.parquet")
83
+ cnt += 1
84
+ text_list = []
85
+ text_len = 0
86
+ # if cnt>100:
87
+ # break
88
+ # break
89
+ df = pd.DataFrame(text_list, columns=["text"]).reset_index()
90
+ df.to_parquet(f"{out_dir}/{task_name}_{cnt}.parquet")
91
+
92
+ # %%
93
+ text = text_list[1]
94
+ text = touten_insert(text)
95
+ text = del_kaigyo(text)
96
+ print(text)
97
+
98
+ # %%
99
+ text = del_kaigyo(text)
100
+ print(text)
101
+
102
+
103
+ # %%
104
+ t = "たくさんの「わたし」に見てもらいたいから、ダムの映画祭を、谷根千で、"
105
+ # end_filter(t)
106
+
107
+ # %%
108
+ df = pd.read_parquet("data/jap2010_parquet/00000001_0.parquet")
109
+ df
110
+
111
+ # %%
112
+ # huggingface-cli upload --repo-type=dataset hatakeyama-llm-team/japanese2010 .
113
+
114
+ # %%
115
+ # load test
116
+ dataset = load_dataset("hatakeyama-llm-team/japanese2010",
117
+ streaming=True,
118
+ split="train",
119
+ )
120
+
121
+ # %%
122
+ cnt = 0
123
+ for data in dataset:
124
+ print(data["text"][:10])
125
+ cnt += 1
126
+ if cnt > 10:
127
+ break
128
+
129
+ # %%
130
+
131
+
132
+ # %%
133
+ with open("t.txt", "w") as f:
134
+ f.write(data["text"])
135
+
136
+ # %%