import json
from docx import Document
from docx.shared import Pt
import re

class Paragraph:
    def __init__(self, id, text, candidates=None) -> None:
        self.id = id
        self.text = text
        self.candidates = candidates
        self.llm_info = {}

    def set_top_matches(self, top_matches):
        self.candidates = top_matches

    def classify_paragraph(self):
        pat = re.compile("^((.+者?言)|(問言?))：.+")
        if re.match(pat, self.text):
            return 0
        return 1

    def add_llm_info(self, k, v):
        self.llm_info[k] = v

    def extract_classic_candidate(self):
        pat = re.compile("「.+?」")
        candidates = re.findall(pat, self.text)
        self.candidates = candidates
        return candidates

    def to_dict(self):
        """将对象属性转换为字典"""
        return {
            'id': self.id,
            'text': self.text,
            'candidates': self.candidates
        }

    @classmethod
    def load_from_json(cls, file_path):
        """从 JSON 文件加载数据创建 Paragraph 对象"""
        with open(file_path, 'r', encoding='utf-8') as f:
            data = json.load(f)
        para = cls(data['text'])    # 调用类构造器
        para.candidates = data['candidates']
        return para


def get_text_by_font(file_path, font_name=None, font_size=None):
    """
    根据字体信息获取文本

    :param file_path: DOCX文件路径
    :param font_name: 字体名称，可选
    :param font_size: 字体大小，可选
    :return: 符合字体信息的文本
    """
    doc = Document(file_path)
    result_text = ""

    for paragraph in doc.paragraphs:
        for run in paragraph.runs:
            if (font_name is None or run.font.name == font_name) and (font_size is None or run.font.size == Pt(font_size)):
                result_text += run.text
            if run.text == u"法華經義疏":
                print(run.text)
                print(run.font.size)
                print(run.font.name)
                print(f"段落样式: {paragraph.style.name}")

    return result_text


def get_text_by_style(file_path, style_name):
    """
    根据段落样式获取文本

    :param file_path: DOCX文件路径
    :param style_name: 段落样式名称
    :return: 符合段落样式的文本
    """
    doc = Document(file_path)
    result_text = ""

    for paragraph in doc.paragraphs:
        if style_name == paragraph.style.name:
            result_text += paragraph.text + '\n'

    return result_text


def get_book_content(file_path, book_title):
    doc = Document(file_path)
    content = []
    flag = False
    for paragraph in doc.paragraphs:
        if paragraph.style.name == "Heading 1":
            if paragraph.text == book_title:
                flag = True
            else:
                flag = False
        if flag:
            content.append(paragraph.text)
    return content


def split_chapter(book_content):
    pat = re.compile(u"^.+?品第[一二三四五六七八九十]+$")
    #chapters = re.split(pat, book_content)
    chapters = []
    chapter = []
    chapter_names = []
    for line in book_content:
        if pat.match(line):
            chapter_names.append(line)
            if chapter:
                chapters.append(chapter)
                chapter = []
        else:
            chapter.append(line)
    return dict(zip(chapter_names, chapters))


def extract_align_text_from_doc():
    file_path = "./alignment/法華經義疏集合.docx"
    #text = get_text_by_style(file_path, style_name="Heading 1")
    #print("符合条件的文本：", text)
    fahuajingyishu = get_book_content(file_path, book_title="法華經義疏")
    fahuaxuanlun = get_book_content(file_path, book_title="法華玄論")
    books = {}
    books["法華經義疏"] = fahuajingyishu
    books["法華玄論"] = fahuaxuanlun
    json.dump(books, open("books.json", "w", encoding="utf-8"), ensure_ascii=False)
    return books


def build_annotation_book_content():
    #books = json.load(open("books.json", "r", encoding="utf-8"))
    books = extract_align_text_from_doc()
    for book_name, book_content in books.items():
        chapters = split_chapter(book_content)
        json_output = {}
        for chapter_name in chapters:
            if chapter_name not in json_output:
                json_output[chapter_name] = []
            for pid, paragraph in enumerate(chapters[chapter_name]):
                if len(paragraph) > 0:
                    para_object = Paragraph(pid, paragraph)
                    if para_object.classify_paragraph() == 1:
                        candidates = para_object.extract_classic_candidate()
                        json_output[chapter_name].append(para_object.to_dict())
                        if candidates:
                            print(candidates)
        json.dump(json_output, open(f"{book_name}_candidates.json", "w", encoding="utf-8"), ensure_ascii=False)

def build_classic_book_content(file_path):
    doc = Document(file_path)
    content = []
    for paragraph in doc.paragraphs:
        text = re.sub(r"\[[0-9]+\]", "", paragraph.text)
        if len(text) > 0:
            content.append(text)
    chapters = split_chapter(content)
    json.dump(chapters, open(f"classic.json", "w", encoding="utf-8"), ensure_ascii=False)
    return chapters


if __name__ == "__main__":
    #build_classic_book_content("妙法蓮華經.docx")
    build_annotation_book_content()