sentence_boundary_detection / examples /sent_tokenize /regex_chinese_sent_tokenize.py
HoneyTian's picture
update
974e1e6
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import argparse
import re
import time
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--text",
type=str,
default="讹言:“苍天已死,黄天当立;岁在甲子,天下大吉。”令人各以白土书“甲子”二字于家中大门上。"
)
parser.add_argument(
"--language", type=str, default="chinese"
)
args = parser.parse_args()
return args
def chinese_sent_tokenize(text: str):
# 单字符断句符
text = re.sub(r"([。!??])([^”’])", r"\1\n\2", text)
# 英文省略号
text = re.sub(r"(\.{6})([^”’])", r"\1\n\2", text)
# 中文省略号
text = re.sub(r"(…{2})([^”’])", r"\1\n\2", text)
# 如果双引号前有终止符,那么双引号才是句子的终点,把分句符\n放到双引号后,注意前面的几句都小心保留了双引号
text = re.sub(r"([。!??][”’])([^,。!??])", r"\1\n\2", text)
# 段尾如果有多余的\n就去掉它
# 很多规则中会考虑分号; ,但是这里我把它忽略不计,破折号、英文双引号等同样忽略,需要的再做些简单调整即可。
text = text.rstrip()
return text.split("\n")
def main():
args = get_args()
begin_time = time.time()
result = chinese_sent_tokenize(args.text)
cost = time.time() - begin_time
print(f"time cost: {cost}")
print(result)
return
if __name__ == "__main__":
main()