import re

import docx
import typing
from typing import List
import os
import json
import codecs
from docx import Document
import random

color_set = set()
font_set = set()
size_set = set()
bold_set = set()


def read_doc(fname):
    # Document 类，不仅可以新建word文档，也可以打开一个本地文档
    doc = Document(fname)  # 想获取的文档文件名，这里是相对路径。
    # sections = doc.sections  # 获取文档章节信息
    # print(len(sections))

    paragraphs = []
    # print(len(paragraphs))
    volume_title = fname.split('.')[0]
    for i, p in enumerate(doc.paragraphs):
        texts, title, paragraph_type = process_paragraph(p, volume_title)
        if texts is None or len(texts) == 0:
            continue
        if paragraph_type == "exegesis":
            paragraphs[-1].add_exegesis(texts)
        else:
            paragraph = Paragraph(texts, volume_title)
            paragraphs.append(paragraph)
    return paragraphs


class Text:
    def __init__(self, t: str, bold: str):
        self.text = t.strip()
        self.bold = bold
        self.role = None
        self.__set_role()

    def __set_role(self):
        if self.bold:
            self.role = "classic"
        elif self.text.startswith("疏"):
            self.role = "exegesis"
        else:
            self.role = "comment"


class Paragraph:
    def __init__(self, texts: List[Text], title):
        self.title = title
        self.pairs = []
        pair = {"text": [], "comment": []}
        self.exegesis = []
        for i in range(len(texts)):
            if texts[i].role == "classic" or texts[i].role == "title":
                if len(pair["text"]) != 0:
                    self.pairs.append(pair)
                    pair = {"text": [], "comment": []}
                pair["text"].append(texts[i].text)
            else:
                pair["comment"].append(texts[i].text)
        self.pairs.append(pair)
        for pair in self.pairs:
            pair["text"] = "".join(pair["text"])
            pair["comment"] = "".join(pair["comment"])

    def add_exegesis(self, texts):
        for text in texts:
            self.exegesis.append(text.text)

    def to_json(self):
        return {"title": self.title, "pairs": self.pairs, "exegesis": self.exegesis}


def process_paragraph(p, volume_name):
    # paragraph对象里还有更小的run对象，run对象才包含了段落对象的文字信息。
    texts = []
    paragraph_type = None
    title_pat = re.compile(r"^卷[一二三四五六七八九十]+：[\u4e00-\u9fa5]+")
    if title_pat.match(p.text) or len(p.text.strip()) == 0:
        return None, None, None
    for r in p.runs:
        font = r.font.name  # 字体均为宋体
        font_set.add(font)
        size = str(r.font.size)
        size_set.add(size)
        color = str(r.font.color.rgb)
        color_set.add(color)
        bold = r.bold
        # print(r.text, color, size)
        t = Text(r.text, bold)
        if t.role == "classic":
            paragraph_type = "classic"
        if paragraph_type is None:
            if t.role == "exegesis":
                paragraph_type = "exegesis"
        if len(t.text) == 0:
            continue
        texts.append(t)
    return texts, volume_name, paragraph_type


def build_train_data_for_bge(path, write_file, negtive_sample_rate=2):
    """
    这里不应该用句对句数据训练，因为会泄露，但是可以用段对段数据训练
    :param path:
    :param negtive_sample_rate:
    :return:
    """

    def merge_paragraph(paragraph: dict, info_type: str):
        if info_type == "classic":
            return "".join([p["text"] for p in paragraph["pairs"]])
        elif info_type == "comment":
            return "".join([p["comment"] for p in paragraph["pairs"]])
        elif info_type == "exegesis":
            return "".join(paragraph["exegesis"])

    def write_line(s1, s2, s1_type, s2_type, neg_paragraphs, writer):
        line_data = {"query": s1, "pos": [s2],
                     "neg": [merge_paragraph(json.loads(p), s2_type) for p in neg_paragraphs]}
        writer.write(json.dumps(line_data, ensure_ascii=False) + "\n")
        line_data = {"query": s2, "pos": [s1],
                     "neg": [merge_paragraph(json.loads(p), s1_type) for p in neg_paragraphs]}
        writer.write(json.dumps(line_data, ensure_ascii=False) + "\n")

    lines = open(path).readlines()
    with open(write_file, "w") as writer:
        for line in lines:
            paragraph = json.loads(line)
            s1 = merge_paragraph(paragraph, "classic")
            s2 = merge_paragraph(paragraph, "comment")
            s3 = merge_paragraph(paragraph, "exegesis")
            neg_paragraphs = random.sample(lines, negtive_sample_rate)
            if len(s1) != 0 and len(s2) != 0:
                write_line(s1, s2, "classic", "comment", neg_paragraphs, writer)
            if len(s1) != 0 and len(s3) != 0:
                write_line(s1, s3, "classic", "exegesis", neg_paragraphs, writer)
            if len(s2) != 0 and len(s3) != 0:
                write_line(s2, s3, "comment", "exegesis", neg_paragraphs, writer)


def write_json(paragraphs: List[Paragraph]):
    with codecs.open("liji.jsonl", "w", "utf-8") as writer:
        for paragraph in paragraphs:
            # print(paragraph.to_json())
            writer.write(json.dumps(paragraph.to_json(), ensure_ascii=False) + "\n")

def build_test_data():
    dir_name = "礼记注疏"
    fnames = os.listdir(dir_name)
    paragraphs = []
    for fname in fnames:
        paragraphs.extend(read_doc(os.path.join(dir_name, fname)))
    write_json(paragraphs)


# 按间距中的绿色按钮以运行脚本。
if __name__ == '__main__':
    #build_train_data_for_bge("liji.jsonl", "liji_exegesis_bge_train.jsonl")
    build_test_data()
