WangZeJun commited on
Commit
936541a
·
verified ·
1 Parent(s): d3b1ebc

Upload finetune_data.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. finetune_data.py +130 -0
finetune_data.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+
3
+ import argparse
4
+ import json
5
+ import random
6
+ from ltp import LTP
7
+ from tqdm import tqdm
8
+
9
+ def parse_args():
10
+ parser = argparse.ArgumentParser()
11
+ parser.add_argument("--input", type=str, required=True)
12
+ parser.add_argument("--output", type=str, required=True)
13
+ parser.add_argument("--ltp_model", type=str, required=True)
14
+ parser.add_argument("--basic_hanzi", type=str, default="confusion/basic_hanzi_2500.txt")
15
+ parser.add_argument("--sound_confusion", type=str, default="confusion/sound_confusion.txt")
16
+ parser.add_argument("--shape_confusion", type=str, default="confusion/shape_confusion.txt")
17
+ parser.add_argument("--same_ratio", type=float, default=0.1)
18
+ parser.add_argument("--repeat_ratio", type=float, default=0.2)
19
+ parser.add_argument("--delete_ratio", type=float, default=0.2)
20
+ parser.add_argument("--sound_ratio", type=float, default=0.4)
21
+ parser.add_argument("--shape_ratio", type=float, default=0.1)
22
+ parser.add_argument("--whitelist", type=str, default="一二三四五六七八九十")
23
+ parser.add_argument("--seed", type=int, default=42)
24
+ args = parser.parse_args()
25
+ return args
26
+
27
+ def isChinese(word):
28
+ for ch in word:
29
+ cp = ord(ch)
30
+ if cp >= 0x4E00 and cp <= 0x9FA5:
31
+ continue
32
+ return False
33
+ return True
34
+
35
+ def load_hanzi(path):
36
+ hanzi = set()
37
+ with open(path, mode="r", encoding="utf-8") as handle:
38
+ for line in handle:
39
+ line = line.strip()
40
+ assert len(line) == 1
41
+ hanzi.update(line)
42
+ return hanzi
43
+
44
+ def load_confusion_set(path, hanzi):
45
+ confusion_set = {}
46
+ with open(path, mode="r", encoding="utf-8") as handle:
47
+ for line in handle:
48
+ line = line.strip().split()
49
+ if len(line) < 2: continue
50
+ key, val = line[0], []
51
+ for c in line[1]:
52
+ if c in hanzi and c not in val and c != key:
53
+ val.append(c)
54
+ if val:
55
+ confusion_set[key] = val
56
+ return confusion_set
57
+
58
+ def do_mask(sent, args):
59
+ # 分词和词性标注
60
+ cws, pos = args.ltp.pipeline(sent, tasks=["cws", "pos"], return_dict=False)
61
+
62
+ n = len(cws)
63
+ i = random.choice(range(n))
64
+ word = cws[i]
65
+ if not isChinese(word):
66
+ i = random.choice(range(n))
67
+ word = cws[i]
68
+ if not isChinese(word):
69
+ return sent
70
+
71
+ p = random.random()
72
+ p1 = args.same_ratio
73
+ p2 = p1 + args.repeat_ratio
74
+ p3 = p2 + args.delete_ratio
75
+ p4 = p3 + args.sound_ratio
76
+ p5 = p4 + args.shape_ratio
77
+ assert abs(p5 - 1) < 0.001
78
+ if p < p1:
79
+ return sent
80
+ if p < p2:
81
+ # 字词冗余
82
+ cws[i] += word
83
+ return ''.join(cws)
84
+ if pos[i] in ['nh', 'ns']:
85
+ # 不修改人名地名
86
+ return sent
87
+ chars = list(word)
88
+ k = random.choice(range(len(word)))
89
+ c = chars[k]
90
+ if c in args.whitelist:
91
+ return sent
92
+ if p < p3:
93
+ if len(word) < 2:
94
+ return sent
95
+ chars[k] = ''
96
+ cws[i] = ''.join(chars)
97
+ return ''.join(cws)
98
+ if p < p4:
99
+ if c in args.sound_set:
100
+ chars[k] = random.choice(args.sound_set[c])
101
+ else:
102
+ if c in args.shape_set:
103
+ chars[k] = random.choice(args.shape_set[c])
104
+ cws[i] = ''.join(chars)
105
+ return ''.join(cws)
106
+
107
+ if __name__ == "__main__":
108
+ args = parse_args()
109
+ random.seed(args.seed)
110
+ # 常用汉字集合
111
+ hanzi = load_hanzi(args.basic_hanzi)
112
+
113
+ # 混淆集
114
+ args.sound_set = load_confusion_set(args.sound_confusion, hanzi)
115
+ args.shape_set = load_confusion_set(args.shape_confusion, hanzi)
116
+ args.hanzi = list(hanzi)
117
+
118
+ # ltp中文分词模型
119
+ args.ltp = LTP(args.ltp_model)
120
+
121
+ output = open(args.output, mode="w")
122
+ with open(args.input, mode="r", encoding="utf-8") as handle:
123
+ for line in tqdm(handle):
124
+ sent = line.strip()
125
+ if len(sent) < 4:
126
+ continue
127
+ source = do_mask(sent, args)
128
+ output.write(json.dumps({"source": source, "target": sent}, ensure_ascii=False))
129
+ output.write("\n")
130
+