"""
法律文档处理脚本  分块
从DOCX文件中提取文本内容，按照法律文档的结构进行分割和整理
支持编、分编、章、节、条等层级结构的识别和处理
"""

from docx import Document
import re
import pandas as pd
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from tools import getFiles
from tqdm import tqdm

def is_chinese_char_or_punct(ch):
    """
    判断字符是否为中文字符、标点符号或数字
    用于过滤PDF提取文本中的无效字符

    Args:
        ch (str): 待检查的字符

    Returns:
        bool: 如果是有效字符返回True，否则返回False
    """
    return (
        '\u4e00' <= ch <= '\u9fff' or  # 中文字符范围
        '\u3000' <= ch <= '\u303f' or  # 中文标点符号范围
        ch.isdigit() or                # 数字
        ch == ' ' or                   # 空格
        ch in " （）【】：；“”‘’\"\'<>，。,.、/！"  # 常用标点符号
    )

def cleanText(text):
    """
    清理文本，移除无效字符
    保留中文字符、数字、空格和常用标点符号

    Args:
        text (str): 原始文本

    Returns:
        str: 清理后的文本
    """
    filtered_text = [ch for ch in text if is_chinese_char_or_punct(ch)]
    cleaned_text = ''.join(filtered_text)
    return cleaned_text

# ==================== 配置参数 ====================
directory = '../../data/laws'  # 从 datasets/scripts 向上两级到项目根目录，再进入data/laws
fileend = '.docx'        # 处理的文件扩展名

# 获取所有DOCX法律文档
laws = getFiles(directory, fileend)
print(f"📚 找到 {len(laws)} 个法律文档待处理")

# ==================== 正则表达式模式 ====================
# 用于识别法律文档的层级结构
bian_re = re.compile(r"^第[零一二三四五六七八九十百千]+编\s+")      # 编级别
fenbian_re = re.compile(r"^第[零一二三四五六七八九十百]+分编\s+")    # 分编级别
zhang_re = re.compile(r"^第[零一二三四五六七八九十百千]+章\s+")     # 章级别
jie_re = re.compile(r"^第[零一二三四五六七八九十百千]+节\s+")      # 节级别
tiao_re = re.compile(r"^第[零一二三四五六七八九十百千]+条\s+")     # 条级别

print("🔍 开始处理法律文档...")
# ==================== 主要处理循环 ====================
for law in tqdm(laws, desc="处理法律文档"):
    print(f"\n📖 正在处理: {law}")

    # ==================== DOCX文本提取 ====================
    text_all = ""
    try:
        doc = Document(f"{directory}/{law}{fileend}")
        print(f"  📄 总段落数: {len(doc.paragraphs)}")

        for para_num, paragraph in enumerate(doc.paragraphs, 1):
            # 提取段落文本
            para_text = paragraph.text.strip()
            if para_text:
                # 移除页码行（格式如 "- 1 -", "- 2 -" 等）
                if not re.match(r"^\s*-\s*\d+\s*-\s*$", para_text):
                    text_all += para_text + "\n"

            if para_num % 100 == 0:  # 每100段显示一次进度
                print(f"    已处理 {para_num}/{len(doc.paragraphs)} 段")

    except Exception as e:
        print(f"  ❌ DOCX读取失败: {e}")
        continue

    # 按行分割所有文本
    lines = text_all.split("\n")
    print(f"  📝 提取到 {len(lines)} 行文本")

    chunks = []
    saveText = ""
    current_bian = ""
    current_fenbian = ""
    current_zhang = ""
    current_jie = ""
    current_text = ""
    count = 0

    savedFlag=True

    for line in lines:
        if not line:
            continue

        line=cleanText(line)
        if line.replace(" ", "").isdigit():
            continue

        if bian_re.match(line):
            if not savedFlag:
                chunks.append({
                "文件名":law,
                "编": current_bian,
                "分编": current_fenbian,
                "章": current_zhang,
                "节": current_jie,
                "内容": current_text
                })
                saveText += current_text + "\n"

            saveText += line + "\n"
            savedFlag = True
            current_bian = line
            current_fenbian = ""
            current_zhang = ""
            current_jie = ""
            current_text = ""
        elif fenbian_re.match(line):
            if not savedFlag:
                chunks.append({
                "文件名":law,
                "编": current_bian,
                "分编": current_fenbian,
                "章": current_zhang,
                "节": current_jie,
                "内容": current_text
                })
                saveText += current_text + "\n"
            saveText += line + "\n"
            savedFlag = True
            current_fenbian = line
            current_zhang = ""
            current_jie = ""
            current_text = ""
        elif zhang_re.match(line) or line=="附则":
            if not savedFlag:
                chunks.append({
                "文件名":law,
                "编": current_bian,
                "分编": current_fenbian,
                "章": current_zhang,
                "节": current_jie,
                "内容": current_text
                })
                saveText += current_text + "\n"
            saveText += line + "\n"
            savedFlag = True
            current_zhang = line
            current_jie = ""
            current_text = ""
        elif jie_re.match(line):
            if not savedFlag:
                chunks.append({
                "文件名":law,
                "编": current_bian,
                "分编": current_fenbian,
                "章": current_zhang,
                "节": current_jie,
                "内容": current_text
                })
                saveText += current_text + "\n"
            saveText += line + "\n"
            savedFlag = True
            current_jie = line
            current_text = ""
        elif tiao_re.match(line):
            if not savedFlag:
                chunks.append({
                "文件名":law,
                "编": current_bian,
                "分编": current_fenbian,
                "章": current_zhang,
                "节": current_jie,
                "内容": current_text
                })
                saveText += current_text + "\n"
            savedFlag=False
            current_text = line
        else:
            current_text = current_text + line
    chunks.append({
    "文件名":law,
    "编": current_bian,
    "分编": current_fenbian,
    "章": current_zhang,
    "节": current_jie,
    "内容": current_text
    })
    saveText += current_text + "\n"
    df = pd.DataFrame(chunks)
    df.to_csv(f"{directory}/{law}.csv", index=False, encoding="utf-8-sig")

    with open(f"{directory}/{law}.txt", "w", encoding="utf-8") as f:
        f.write(saveText)

fileend='.csv'
csv_files=[f"{directory}/{f}{fileend}" for f in getFiles(directory, fileend)]
dfs=[]
for file in tqdm(csv_files):
    dfs.append(pd.read_csv(file))
df = pd.concat(dfs, ignore_index=True)

df.to_csv("../../data/Laws_All.csv", index=False, encoding="utf-8-sig")
