import re
import codecs
import json


class Unit:
    def __init__(self, text=None, comment=None):
        self.text = text
        self.comment = comment

    @staticmethod
    def find_lcsubstr(s1, s2):
        # 生成0矩阵，为方便后续计算，比字符串长度多了一列
        m = [[0 for i in range(len(s2) + 1)] for j in range(len(s1) + 1)]
        mmax = 0  # 最长匹配的长度
        p = 0  # 最长匹配对应在s1中的最后一位
        for i in range(len(s1)):
            for j in range(len(s2)):
                if s1[i] == s2[j]:
                    m[i + 1][j + 1] = m[i][j] + 1
                    if m[i + 1][j + 1] > mmax:
                        mmax = m[i + 1][j + 1]
                        p = i + 1
        return s1[p - mmax:p], mmax  # 返回最长子串及其长度

    # print(find_lcsubstr('abfcdfg', 'abcdfg'))

    @staticmethod
    def dp(text, comment):
        mat = [[{"value": 0, "path": None} for j in range(len(comment) + 1)] for i in range(len(text) + 1)]
        sentences = [{"text": text[i], "comment": []} for i in range(len(text))]
        for i in range(1, len(text) + 1):
            for j in range(1, len(comment) + 1):
                op_1 = mat[i][j - 1]["value"] + Unit.find_lcsubstr(text[i - 1], comment[j - 1])[1]  # j与i匹配
                op_2 = mat[i - 1][j]["value"]  # 跳过i
                if op_1 >= op_2:
                    mat[i][j]["value"] = op_1
                    mat[i][j]["path"] = (i, j - 1)
                else:
                    mat[i][j]["value"] = op_2
                    mat[i][j]["path"] = (i - 1, j)
        i = len(text)
        j = len(comment)
        while i > 0 and j > 0:
            path = mat[i][j]["path"]
            if path[0] == i and path[1] == j - 1:
                sentences[i - 1]["comment"].append(comment[j - 1])
            elif path[0] == i - 1 and path[1] == j:
                pass
            else:
                raise Exception("wrong path")
            i, j = path
        for i in range(len(sentences)):
            sentences[i]["comment"].reverse()
        return sentences

    def align(self):
        p_stop = re.compile(u"。|！|？|；")
        origin_sentences = re.split(p_stop, self.text)

        def merge(sentences):
            new_sentences = []
            for i in range(len(sentences)):
                if len(sentences[i]) > 2 or i == 0:
                    new_sentences.append(sentences[i])
                else:
                    new_sentences[-1] += sentences[i]
            return new_sentences

        comment_sentences = re.split(p_stop, self.comment)
        sentences = self.dp(merge(origin_sentences), merge(comment_sentences))
        return sentences


def read_file_lv(fname):
    units = []
    align_sentences = []
    unit = Unit()
    for line in codecs.open(fname, encoding="utf-8-sig"):
        text = line.strip()
        if len(text) != 0:
            if text[:2] == u"呂注":
                if unit.comment is None:
                    unit.comment = text[2:]
                else:
                    print(unit.comment)
                    raise Exception("wrong parsing")
            else:
                if text[0].isdigit():
                    # 原文
                    if unit.text is not None:
                        assert unit.comment is not None, (text, unit.text)
                        align_sentences.append(unit.align())
                        units.append(unit)
                        unit = Unit()
                    p_num = re.compile("[0-9]+\.\ ")
                    unit.text = re.sub(p_num, "", text)
                elif unit.text is not None and unit.comment is not None:
                    unit.comment += text
                else:
                    print(text, unit.text, unit.comment)
                    raise Exception("wrong parsing")

    if unit.text is not None:
        align_sentences.append(unit.align())
        units.append(unit)
    json.dump(align_sentences, codecs.open("lv_comment_align.json", "w", encoding="utf8"),
              ensure_ascii=False, indent=4)


class Paragraph:
    def __init__(self):
        self.sentences = list()
        # self.comments = list()
        self.sub_sentences = list()
        self.text = None

    def dict(self):
        s = {"paragraph": self.text}
        s.update({"comments": self.sentences})
        return s


class GuoParagraph:
    def __init__(self, paragraph):
        self.text = paragraph
        self.sentences = []
        self.clean()
        self.split()
        self.comments = []

    def clean(self):
        p = re.compile(u"\[杭[0-9]+\]")
        self.text = re.sub(p, "", self.text)

    def split(self):
        p = re.compile(u"【.+?】")
        sentences = re.split(p, self.text)
        for i in range(1, len(sentences)):
            if len(sentences[i]) > 0 and sentences[i][0] in u"。，！？；：、":
                sentences[i - 1] += sentences[i][0]
                sentences[i] = sentences[i][1:]
        sentences[-2] = sentences[-2] + sentences[-1]
        self.sentences = sentences[:-1]

    def check(self):
        assert len(self.sentences) == len(self.comments), (self.sentences, self.comments)
        pattern = re.compile(u"【[一二三四五六七八九０]+?】")
        self.text = re.sub(pattern, "", self.text)
        for i in range(len(self.sentences)):
            self.comments[i].update({"text": self.sentences[i]})

    def dict(self):
        s = {"paragraph": self.text}
        s.update({"comments": self.comments})
        return s


def read_file_guo(fname):
    paragraphs = []
    comment_pattern = re.compile(u"^【[一二三四五六七八九０]+】【注】.+")
    annotation_pattern = re.compile(u"^(【[一二三四五六七八九０]+】)?【(疏|校|(釋文))】.+")
    for line in codecs.open(fname, encoding="utf-8-sig"):
        text = line.strip()
        if len(text) == 0:
            continue
        if not text.startswith("【"):  # 原文
            pattern = re.compile("([0-9]+\.)|(（.+?）)")
            paragraph = GuoParagraph(re.sub(pattern, "", text))
            paragraphs.append(paragraph)
        elif re.match(comment_pattern, text):  # 注
            pattern = re.compile(u"【[一二三四五六七八九０]+?】【注】")
            paragraphs[-1].comments.append({u"注": re.sub(pattern, "", text)})
        elif re.match(annotation_pattern, text):  # 疏、校、释文
            pattern = re.compile(u"^(【[一二三四五六七八九０]+】)【(疏|校|(釋文))】.+")
            if re.match(pattern, text):
                pattern = re.compile(u"【[一二三四五六七八九０]+】")
                text = re.sub(pattern, "", text)
                pattern = re.compile(u"【(疏|校|(釋文))】")
                paragraphs[-1].comments.append({text[1]: re.sub(pattern, "", text)})
            else:
                pattern = re.compile(u"(【[一二三四五六七八九０]+】)?【(疏|校|(釋文))】")
                paragraphs[-1].comments[-1][text[1]] = re.sub(pattern, "", text)
    for p in paragraphs:
        p.check()
    paragraphs = [p.dict() for p in paragraphs]
    json.dump(paragraphs, codecs.open("guo_comment_align.json", "w", encoding="utf-8"), ensure_ascii=False, indent=4)


def my_subset(t1, t2, m_num=5):
    s1 = set(t1)
    s2 = set(t2)
    return s1.issubset(s2) or len(s1.difference(s2)) <= min(min(len(s1), len(s2)) // 3, m_num)


def read_standard(fname):
    paragraphs = []
    paragraph = Paragraph()
    p = re.compile(u"【[一二三四五六七八九０]+?】")
    for line in codecs.open(fname, encoding='utf-8-sig'):
        text = line.strip()
        if len(text) == 0:
            if len(paragraph.sentences) > 0:
                paragraphs.append(paragraph)
                paragraph = Paragraph()
        else:
            annotations = re.findall(p, text)
            if len(annotations) > 1:
                sub_sentences = re.split(p, text)
                for i in range(1, len(sub_sentences)):
                    if len(sub_sentences[i]) > 0 and sub_sentences[i][0] in u"。，！？；：、":
                        sub_sentences[i - 1] += sub_sentences[i][0]
                        sub_sentences[i] = sub_sentences[i][1:]
            else:
                sub_sentences = [re.sub(p, "", text)]
            paragraph.sub_sentences.append({"text": sub_sentences})
            text = re.sub(p, "", text)
            paragraph.sentences.append({"text": text})
    if len(paragraph.sentences) > 0:
        paragraphs.append(paragraph)
    for p in paragraphs:
        p.text = "".join([s["text"] for s in p.sentences])
    return paragraphs


def remove_punctuation(text):
    punctuation = re.compile("[0-9]+\.|(【[一二三四五六七八九０]+】)|[，。：“”？！；、 ]+")
    return re.sub(punctuation, "", text)


def align_guo(standard, guo):
    punctuation = re.compile("[。：？！，]+")
    i = 0
    j = 1  # 标题
    while i < len(standard) and j < len(guo):
        raw_guo_paragraph_text = remove_punctuation(guo[j]["paragraph"])
        while my_subset(raw_guo_paragraph_text, remove_punctuation(standard[i].text)):
            # 郭是标准的一段内的
            for i1 in range(len(standard[i].sentences)):
                for j1 in range(len(guo[j]["comments"])):
                    if "text" not in guo[j]["comments"][j1]:
                        continue
                    text = guo[j]["comments"][j1]["text"]
                    # 只看最后一个片段能匹配就认为注释属于这里
                    segments = re.split(punctuation, text)
                    if len(segments[-1]) <= 5 and len(segments) >= 2:
                        text = "".join([segments[-2], segments[-1]])
                    else:
                        text = segments[-1]
                    if remove_punctuation(text) in remove_punctuation(standard[i].sentences[i1]["text"]):
                        guo[j]["comments"][j1].pop("text")
                        standard[i].sentences[i1].update(guo[j]["comments"][j1])
            j += 1
            if j >= len(guo):
                break
            raw_guo_paragraph_text = remove_punctuation(guo[j]["paragraph"])
        i += 1
    assert i == len(standard) and j == len(guo), (i, j, len(standard), len(guo))
    return standard


def align_lv(standard, lv):
    i = 0
    j = 0
    punctuation = re.compile("([0-9]+\.)|(【[一二三四五六七八九０]+】)|([，。：“”？！；、]+)")
    new_lv = []
    for p in lv:
        new_lv.extend(p)
    lv = new_lv
    while i < len(standard) and j < len(lv):
        paragraph = standard[i]
        lv_raw_text = remove_punctuation(lv[j]["text"])
        raw_paragraph_text = remove_punctuation(paragraph.text)
        if my_subset(raw_paragraph_text, lv_raw_text):
            if len(paragraph.sentences) == 1:
                paragraph.sentences[0][u"吕注"] = "。".join(lv[j]["comment"])
            else:
                new_comment = {"text": lv[j]["text"], u"吕注": "。".join(lv[j]["comment"])}
                paragraph.sentences.append(new_comment)
        else:
            assert my_subset(lv_raw_text, raw_paragraph_text), (
                lv_raw_text, raw_paragraph_text, set(lv_raw_text).difference(set(raw_paragraph_text)))
            last_match_sentence_id = 0  # 匹配过的从下面继续，不再从0
            while my_subset(lv_raw_text, raw_paragraph_text):
                flag = False
                for k in range(last_match_sentence_id, len(paragraph.sentences)):
                    sentence = paragraph.sentences[k]
                    raw_sentence_text = re.sub(punctuation, "", sentence["text"])
                    # if my_subset(lv_raw_text, raw_sentence_text):
                    if my_subset(lv_raw_text, raw_sentence_text):
                        # 吕注已经找到了，该下一个了
                        flag = True
                        last_match_sentence_id = k
                        if len(lv[j]["comment"]) > 0:
                            sentence[u"吕注"] = "。".join(lv[j]["comment"])
                        break
                    # elif my_subset(raw_sentence_text, lv_raw_text):
                    elif my_subset(raw_sentence_text, lv_raw_text, 1):
                        flag = True
                        last_match_sentence_id = k + 1
                        if len(lv[j]["comment"]) > 0:
                            sentence["text"] = lv[j]["text"]
                            sentence[u"吕注"] = "。".join(lv[j]["comment"])
                        break
                # print(lv_raw_text, raw_sentence_text)
                if not flag:
                    # 在段内的每个句子都不太和lv的句子匹配，看看lv的句子是否与下一段更加符合
                    if lv_raw_text == remove_punctuation(u"夫言非吹也，言者有言，其所言者特未定也"):
                        print(my_subset(lv_raw_text, remove_punctuation(standard[i + 1].text)))
                        print(len(set(lv_raw_text).difference(set(raw_paragraph_text))))
                        print(len(set(lv_raw_text).difference(set(remove_punctuation(standard[i + 1].text)))))
                        print(last_match_sentence_id, len(paragraph.sentences))
                    if i + 1 < len(standard) and \
                            my_subset(lv_raw_text, remove_punctuation(standard[i + 1].text)) and \
                            (len(set(lv_raw_text).difference(set(raw_paragraph_text))) > \
                             len(set(lv_raw_text).difference(set(remove_punctuation(standard[i + 1].text)))) or
                             (remove_punctuation(standard[i + 1].text).find(lv_raw_text) >= 0 >
                              raw_paragraph_text.find(lv_raw_text))):
                        break
                    elif len(lv[j]["comment"]) > 0:
                        paragraph.sentences.append({"text": lv[j]["text"], u"吕注": "。".join(lv[j]["comment"])})
                j += 1
                if j >= len(lv):
                    break
                lv_raw_text = re.sub(punctuation, "", lv[j]["text"])
        i += 1
    assert i == len(standard) and j == len(lv), (i, j, len(standard), len(lv))
    aligned_result = []
    for p in standard:
        aligned_result.append(p.dict())
    json.dump(aligned_result, codecs.open("aligned_comment.json", "w", encoding="utf-8"), ensure_ascii=False, indent=4)


if __name__ == "__main__":
    read_file_lv("lvzhu.txt")
    read_file_guo("guozhu.txt")  # 需要删掉第一行和最后几行
    standard = read_standard("standard.txt")
    standard = align_guo(standard, json.load(codecs.open("guo_comment_align.json", encoding="utf8")))
    align_lv(standard, json.load(codecs.open("lv_comment_align.json", encoding="utf8")))
