Datasets:
lmqg
/

Modalities:
Text
Languages:
Japanese
ArXiv:
Libraries:
Datasets
License:
File size: 1,224 Bytes
0444c95
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
import re
from typing import List
import spacy

__all__ = 'SentSplit'


class JASplitter:
    """ JA sentence splitter from https://github.com/himkt/konoha/blob/master/konoha/sentence_tokenizer.py """

    PERIOD = "。"
    PERIOD_SPECIAL = "__PERIOD__"
    PATTERNS = [re.compile(r"(.*?)"), re.compile(r"「.*?」")]

    @staticmethod
    def conv_period(item) -> str:
        return item.group(0).replace(JASplitter.PERIOD, JASplitter.PERIOD_SPECIAL)

    def __call__(self, document) -> List[str]:
        for pattern in JASplitter.PATTERNS:
            document = re.sub(pattern, self.conv_period, document)

        result = []
        for line in document.split("\n"):
            line = line.rstrip()
            line = line.replace("\n", "")
            line = line.replace("\r", "")
            line = line.replace("。", "。\n")
            sentences = line.split("\n")

            for sentence in sentences:
                if not sentence:
                    continue

                period_special = JASplitter.PERIOD_SPECIAL
                period = JASplitter.PERIOD
                sentence = sentence.replace(period_special, period)
                result.append(sentence)

        return result