import codecs
import json
import re

class Paragraph:
    def __init__(self, pairs: list, volume_title=None, chapter_title=None):
        self.classics = [p[0] for p in pairs]
        annotation_list = [p[1] for p in pairs]
        assert len(self.classics) == len(annotation_list)
        self.annotations, self.annotation_ids = self.split_annotations(annotation_list)
        self.volume_title = volume_title
        self.chapter_title = chapter_title

    @staticmethod
    def split_annotations(annotation_list):
        punc_pat = re.compile(r"[。；]+")
        splitted = []
        labels = []
        for id, annotation in enumerate(annotation_list):
            if annotation is None or len(annotation) == 0:
                continue
            sentences = re.split(punc_pat, annotation.strip())
            for sent in sentences:
                if len(sent) > 0:
                    splitted.append(sent)
                    labels.append(id)
        assert len(splitted) == len(labels)
        labels = {i: labels[i] for i in range(len(labels))}
        return splitted, labels


def read_sishu(fname):
    data = json.load(open(fname))
    dataset = []
    for paragraph in data:
        if u"論語" not in paragraph["volume_title"]:
            if len(paragraph["pairs"]) > 1:
                dataset.append(Paragraph(paragraph["pairs"], paragraph["volume_title"], paragraph["chapter_title"]))
    return dataset


def read_mengzi_liji(fname):
    dataset = []
    for line in open(fname):
        item = json.loads(line)
        pairs = [(p["text"], p["comment"]) for p in item["pairs"]]
        if len(pairs) > 1:
            dataset.append(Paragraph(pairs, item["title"]))
    return dataset

def read_zhuangzi(fname):
    writer = codecs.open("zhuangzi.jsonl", "w", "utf-8")
    data = json.load(codecs.open(fname, encoding="utf-8"))
    dataset = []
    author_names = set()
    # 获取所有作者名称
    for paragraph in data:
        comments = paragraph["comments"]
        for comment in comments:
            for author_name in comment:
                if author_name != "text":
                    author_names.add(author_name)
    for paragraph in data:
        author_pairs = {author_name: [] for author_name in author_names}
        comments = paragraph["comments"]
        for comment in comments:
            for author_name in author_names:
                if author_name not in comment:
                    author_pairs[author_name].append((comment["text"], None))
                else:
                    author_pairs[author_name].append((comment["text"], comment[author_name]))
        for author_name in author_pairs:
            writer.write(json.dumps(author_pairs[author_name], ensure_ascii=False) + "\n")
            p = Paragraph(author_pairs[author_name], author_name)
            if len(p.annotation_ids) > 1:
                dataset.append(p)
    writer.close()
    return dataset


if __name__ == '__main__':
    print("zhuangzi", len(read_zhuangzi("data/zhuangzi.json")))
    print("sishu", len(read_sishu("data/sishu.json")))
    print("mengzi", len(read_mengzi_liji("data/mengzi.jsonl")))
    print("liji", len(read_mengzi_liji("data/liji.jsonl")))
    print("xiaojing", len(read_mengzi_liji("data/xiaojing.jsonl")))
