from docx import Document
import re
import requests
import json
import time
import os


# ======================== 匹配逻辑 ========================
def validate_sentence_endings_by_paragraph(chinese_text, english_text):
    # 只关注句子结束符号：句号、问号、感叹号
    ending_punctuation_map = {
        '。': '.',
        '？': '?',
        '！': '!'
    }

    # 中文引号集合（包括全角和半角）
    chinese_quotes = {'"', "'", "“", "”", "‘", "’", "「", "」", "『", "』"}

    # 常见缩写列表（避免误判为句子结束）
    abbreviations = {
        'no', 'mr', 'mrs', 'ms', 'dr', 'prof', 'st', 'ave', 'blvd', 'co',
        'inc', 'ltd', 'etc', 'e.g', 'i.e', 'vs', 'p.s', 'a.m', 'p.m',
        'vol', 'fig', 'eq', 'ch', 'sec', 'ref', 'ex', 'al', 'ed', 'est',
        # 国家/地区缩写 (ISO 2-letter codes)
        'us', 'uk', 'ca', 'au', 'nz', 'in', 'cn', 'jp', 'kr', 'de',
        'fr', 'it', 'es', 'nl', 'ru', 'br', 'mx', 'ar', 'za', 'eg',
    }

    # 按段落分割文档
    chinese_paragraphs = [p.strip() for p in chinese_text.split('\n') if p.strip()]
    english_paragraphs = [p.strip() for p in english_text.split('\n') if p.strip()]

    # 检查段落数量是否匹配
    if len(chinese_paragraphs) != len(english_paragraphs):
        return False, f"段落数量不匹配: 中文 {len(chinese_paragraphs)} 段, 英文 {len(english_paragraphs)} 段"

    all_valid = True
    report = []

    # 逐段校验
    for para_idx, (ch_para, en_para) in enumerate(zip(chinese_paragraphs, english_paragraphs)):
        # 提取中文段落结束符号序列和对应句子
        ch_endings = []
        ch_sentences = []
        current_sentence = ""
        # 打印调试
        print(f"中文段落 {para_idx + 1}: {ch_para}")
        print(f"英文段落 {para_idx + 1}: {en_para}")

        for char in ch_para:
            current_sentence += char
            if char in ending_punctuation_map:
                ch_endings.append(ending_punctuation_map[char])
                ch_sentences.append(current_sentence.strip())
                current_sentence = ""

        # 添加最后未结束的句子
        if current_sentence:
            ch_sentences.append(current_sentence.strip())

        # 提取英文段落结束符号序列和对应句子
        en_endings = []
        en_sentences = []
        current_sentence = ""
        en_errors = []  # 记录英文中的中文结束符号

        # 避免缩写中的句点被误判
        words = re.split(r'(\s+)', en_para)  # 保留空格
        for i, word in enumerate(words):
            current_sentence += word
            stripped_word = word.strip()

            # 跳过空单词
            if not stripped_word:
                continue


            # 检查是否以结束符+引号结尾（包括英文和中文引号）因为部分招股书中的英文引号导进来会变成中文
            if len(stripped_word) >= 2 and stripped_word[-1] in chinese_quotes and stripped_word[
                -2] in ending_punctuation_map.values():
                # 处理结束符+引号的情况
                ending_char = stripped_word[-2]
                clean_word = re.sub(r'[^\w]', '', stripped_word[:-2]).lower()
                is_abbreviation = clean_word in abbreviations

                # 获取下一个非空字符
                next_char = None
                j = i + 1
                while j < len(words) and next_char is None:
                    next_word = words[j].strip()
                    if next_word:
                        # 跳过开头的引号（包括中文引号）
                        start_idx = 0
                        while start_idx < len(next_word) and next_word[start_idx] in chinese_quotes:
                            start_idx += 1
                        if start_idx < len(next_word):
                            next_char = next_word[start_idx]
                    j += 1

                # 判断是否结束句子
                if (not is_abbreviation and
                        (next_char is None or
                         not next_char.isalpha() or
                         not next_char.islower())):
                    print(f"句子结束（带引号）: {current_sentence.strip()}")
                    en_endings.append(ending_char)
                    en_sentences.append(current_sentence.strip())
                    current_sentence = ""

            # 检查常规结束符（没有引号）
            elif stripped_word[-1] in ending_punctuation_map.values():
                # 检查是否是缩写
                clean_word = re.sub(r'[^\w]', '', stripped_word).lower()
                is_abbreviation = clean_word in abbreviations

                # 获取下一个非空字符
                next_char = None
                j = i + 1
                while j < len(words) and next_char is None:
                    next_word = words[j].strip()
                    if next_word:
                        # 跳过开头的引号（包括中文引号）
                        start_idx = 0
                        while start_idx < len(next_word) and next_word[start_idx] in chinese_quotes:
                            start_idx += 1
                        if start_idx < len(next_word):
                            next_char = next_word[start_idx]
                    j += 1

                # 判断是否结束句子
                if (not is_abbreviation and
                        (next_char is None or
                         not next_char.isalpha() or
                         not next_char.islower())):
                    print(f"句子结束: {current_sentence.strip()}")
                    en_endings.append(stripped_word[-1])
                    en_sentences.append(current_sentence.strip())
                    current_sentence = ""
            # ============== 修改点：结束 ==============

            # 检查中文结束符号错误
            for char in word:
                if char in ending_punctuation_map:
                    # 英文中出现中文结束符号（错误）
                    # 计算当前位置更准确
                    pos = sum(len(w) for w in words[:i]) + len(word[:word.index(char) + 1])
                    error_msg = f"位置 {pos}: 中文结束符 '{char}' (应为 '{ending_punctuation_map[char]}')"
                    en_errors.append(error_msg)

        # 添加最后未结束的句子
        if current_sentence:
            en_sentences.append(current_sentence.strip())

        # 打印调试信息
        print(f"中文句子数: {len(ch_sentences)}, 英文句子数: {len(en_sentences)}")
        print(f"中文结束符: {ch_endings}")
        print(f"英文结束符: {en_endings}")

        # 检查本段结束符号是否匹配
        para_valid = True
        diff = []

        # 检查结束符号数量
        if len(ch_endings) != len(en_endings):
            para_valid = False
            diff.append(f"  结束符号数量不匹配: 中文 {len(ch_endings)} 个, 英文 {len(en_endings)} 个")

            # 显示句子数量差异
            min_sentences = min(len(ch_sentences), len(en_sentences))
            if len(ch_sentences) > min_sentences:
                diff.append(f"  中文多出句子:")
                for i in range(min_sentences, len(ch_sentences)):
                    diff.append(f"    - {ch_sentences[i]}")
            elif len(en_sentences) > min_sentences:
                diff.append(f"  英文多出句子:")
                for i in range(min_sentences, len(en_sentences)):
                    diff.append(f"    - {en_sentences[i]}")

        # 逐位置比较结束符号
        min_len = min(len(ch_endings), len(en_endings))
        for i in range(min_len):
            if ch_endings[i] != en_endings[i]:
                para_valid = False
                diff.append(f"  位置 {i + 1}: 中文结束符 '{ch_endings[i]}' ≠ 英文结束符 '{en_endings[i]}'")

                # 显示对应的句子
                ch_sentence = ch_sentences[i] if i < len(ch_sentences) else "（无对应句子）"
                en_sentence = en_sentences[i] if i < len(en_sentences) else "（无对应句子）"
                diff.append(f"    中文句子: {ch_sentence}")
                diff.append(f"    英文句子: {en_sentence}")

        # 处理多余结束符号
        if len(ch_endings) > min_len:
            para_valid = False
            extra = ch_endings[min_len:]
            diff.append(f"  中文多出 {len(extra)} 个结束符号: {extra}")
            # 显示多余句子
            for i in range(min_len, len(ch_sentences)):
                if i < len(ch_sentences):
                    diff.append(f"    多余中文句子: {ch_sentences[i]}")
        if len(en_endings) > min_len:
            para_valid = False
            extra = en_endings[min_len:]
            diff.append(f"  英文多出 {len(extra)} 个结束符号: {extra}")
            # 显示多余句子
            for i in range(min_len, len(en_sentences)):
                if i < len(en_sentences):
                    diff.append(f"    多余英文句子: {en_sentences[i]}")

        # 添加英文中的中文结束符号错误
        if en_errors:
            para_valid = False
            diff.append("  英文段落中的中文结束符号错误:")
            for error in en_errors:
                # 尝试找到错误位置对应的句子
                match = re.search(r'位置 (\d+)', error)
                if match:
                    pos = int(match.group(1))
                    found = False
                    for idx, sent in enumerate(en_sentences):
                        if pos <= len(sent):
                            diff.append(f"    {error} - 所在句子: {sent}")
                            found = True
                            break
                        pos -= len(sent) + 1  # +1 for the space between sentences
                    if not found:
                        diff.append(f"    {error}")
                else:
                    diff.append(f"    {error}")

        # 汇总本段结果
        if not para_valid:
            all_valid = False
            report.append(f"段落 {para_idx + 1} 校验失败:")
            report.extend(diff)
            report.append("")  # 添加空行分隔段落
        else:
            report.append(f"段落 {para_idx + 1} 校验通过")

    # 返回整体结果
    if all_valid:
        return True, "\n".join(report)
    else:
        return False, "\n".join(report)


def read_word_document(file_path):
    """读取Word文档内容并返回段落文本"""
    doc = Document(file_path)
    paragraphs = [p.text for p in doc.paragraphs if p.text.strip()]
    return "\n".join(paragraphs)


# ======================== 新增的DeepSeek重新分句功能 ========================
# 防止一些规则未涉及到的分句被误判。
def deepseek_resplit_paragraph(paragraph, language):
    """使用DeepSeek大模型重新分句单个段落"""
    # 替换为你的DeepSeek API密钥
    API_KEY = "YOUR_DEEPSEEK_API_KEY"  # 请替换为您的实际API密钥

    if API_KEY == "YOUR_DEEPSEEK_API_KEY":
        raise ValueError("请替换为您的DeepSeek API密钥")

    API_URL = "https://api.deepseek.com/v1/chat/completions"

    # 根据语言选择提示语
    if language == "chinese":
        prompt = f"请将以下中文段落按句子分割（保留原格式，有标点符号错误，也按照标点符号分割。但是缩写和专有名词不要拆开。），输出为JSON列表:\n\n{paragraph}"
    else:
        prompt = f"请将以下英文段落按句子分割（保留原格式，有标点符号错误，也按照标点符号分割。但是缩写和专有名词不要拆开。），输出为JSON列表:\n\n{paragraph}"

    headers = {
        "Authorization": f"Bearer {API_KEY}",
        "Content-Type": "application/json"
    }

    data = {
        "model": "deepseek-chat",
        "messages": [{"role": "user", "content": prompt}],
        "temperature": 0.1,
        "max_tokens": 4096
    }

    try:
        print(f"调用DeepSeek API进行{language}段落重新分句...")
        response = requests.post(API_URL, headers=headers, data=json.dumps(data))
        response.raise_for_status()
        response_data = response.json()

        if 'choices' in response_data and response_data['choices']:
            content = response_data['choices'][0]['message']['content'].strip()
            print(content)

            # 尝试解析JSON格式的响应
            try:
                sentences = json.loads(content)
                if isinstance(sentences, list):
                    # 对于中文，直接连接句子；对于英文，用空格连接
                    if language == "chinese":
                        return "".join(sentences)
                    else:
                        return " ".join(sentences)
            except json.JSONDecodeError:
                # 如果JSON解析失败，尝试提取列表内容
                match = re.search(r'\[.*\]', content, re.DOTALL)
                if match:
                    try:
                        sentences = json.loads(match.group(0))
                        if isinstance(sentences, list):
                            if language == "chinese":
                                return "".join(sentences)
                            else:
                                return " ".join(sentences)
                    except:
                        pass

        print(f"DeepSeek API返回非预期格式: {content}")
        return paragraph  # 失败时返回原文

    except Exception as e:
        print(f"DeepSeek API调用失败: {str(e)}")
        return paragraph


def validate_paragraph_with_resplit(ch_para, en_para, para_idx, original_diff):
    """对单个段落使用大模型重新分句后再次校验"""
    print(f"\n段落 {para_idx + 1} 原始校验失败，尝试重新分句...")

    # 确保输出目录存在
    os.makedirs("resplit_results", exist_ok=True)

    # 分别重新分句中英文段落
    print(f"重新分句中文段落 {para_idx + 1}...")
    resplit_ch_para = deepseek_resplit_paragraph(ch_para, "chinese")

    print(f"重新分句英文段落 {para_idx + 1}...")
    resplit_en_para = deepseek_resplit_paragraph(en_para, "english")

    # 保存重新分句结果
    timestamp = int(time.time())
    ch_file = f"resplit_results/para_{para_idx + 1}_ch_{timestamp}.txt"
    en_file = f"resplit_results/para_{para_idx + 1}_en_{timestamp}.txt"

    with open(ch_file, "w", encoding="utf-8") as f:
        f.write(resplit_ch_para)

    with open(en_file, "w", encoding="utf-8") as f:
        f.write(resplit_en_para)

    print(f"重新分句后的中文段落已保存至: {ch_file}")
    print(f"重新分句后的英文段落已保存至: {en_file}")

    # 使用重新分句后的段落再次校验
    print(f"重新校验段落 {para_idx + 1}...")

    # 创建一个假的段落列表用于校验函数
    ch_paragraphs = [resplit_ch_para]
    en_paragraphs = [resplit_en_para]

    # 调用原始校验逻辑（仅针对这一个段落）
    resplit_valid, resplit_report = validate_sentence_endings_by_paragraph(
        "\n".join(ch_paragraphs), "\n".join(en_paragraphs))

    # 检查重新分句后是否通过
    if resplit_valid:
        return True, f"段落 {para_idx + 1} 重新分句后校验通过"

    # 组合原始错误和重新分句错误
    final_report = [
        f"段落 {para_idx + 1} 原始错误:",
        *original_diff,
        "",
        f"段落 {para_idx + 1} 重新分句后错误:",
        resplit_report
    ]

    return False, "\n".join(final_report)


# ======================== 修改后的文档验证函数 ========================
def validate_word_documents(chinese_doc_path, english_doc_path):
    """校验中英文Word文档的句子结束符号（增加重新分句功能）"""
    # 读取Word文档内容
    try:
        print("读取中文文档...")
        chinese_text = read_word_document(chinese_doc_path)
        print("读取英文文档...")
        english_text = read_word_document(english_doc_path)
    except Exception as e:
        return False, f"读取文档失败: {str(e)}"

    # 按段落分割文档
    chinese_paragraphs = [p.strip() for p in chinese_text.split('\n') if p.strip()]
    english_paragraphs = [p.strip() for p in english_text.split('\n') if p.strip()]

    # 检查段落数量是否匹配
    if len(chinese_paragraphs) != len(english_paragraphs):
        return False, f"段落数量不匹配: 中文 {len(chinese_paragraphs)} 段, 英文 {len(english_paragraphs)} 段"

    all_valid = True
    report = []
    resplit_count = 0
    resplit_success = 0

    # 逐段校验
    for para_idx, (ch_para, en_para) in enumerate(zip(chinese_paragraphs, english_paragraphs)):
        # 创建一个假的段落列表用于校验函数
        ch_paragraphs_temp = [ch_para]
        en_paragraphs_temp = [en_para]

        # 校验单个段落
        para_valid, para_report = validate_sentence_endings_by_paragraph(
            "\n".join(ch_paragraphs_temp), "\n".join(en_paragraphs_temp))

        # 如果段落校验失败，尝试重新分句
        if not para_valid:
            # 提取原始错误信息
            diff_lines = []
            in_diff = False
            for line in para_report.split('\n'):
                if line.startswith("  位置") or line.startswith("  结束符号数量") or line.startswith("  英文段落中的"):
                    in_diff = True
                    diff_lines.append(line)
                elif in_diff and line.strip() == "":
                    in_diff = False
                elif in_diff:
                    diff_lines.append(line)

            # 尝试重新分句该段落
            resplit_count += 1
            resplit_valid, resplit_report = validate_paragraph_with_resplit(
                ch_para, en_para, para_idx, diff_lines)

            if resplit_valid:
                report.append(resplit_report)
                resplit_success += 1
            else:
                report.append(resplit_report)
                all_valid = False
        else:
            report.append(f"段落 {para_idx + 1} 校验通过")

        # 添加段落分隔线
        report.append("-" * 80)

    # 返回整体结果
    final_report = "\n".join(report)
    summary = [
        "=" * 50,
        "校验结果摘要:",
        f"总段落数: {len(chinese_paragraphs)}",
        f"失败段落数: {len(chinese_paragraphs) - resplit_success - (len(chinese_paragraphs) - resplit_count)}",
        f"重新分句尝试次数: {resplit_count}",
        f"重新分句成功次数: {resplit_success}",
        "=" * 50,
        ""
    ]

    if all_valid:
        return True, "\n".join(summary) + final_report
    else:
        return False, "\n".join(summary) + final_report


# ======================== 使用示例 ========================
if __name__ == "__main__":
    # Word文档路径（替换为实际路径）
    chinese_doc_path = "./simplifyAI test.zh-CN.docx"  # 中文Word文档路径
    english_doc_path = "./simplifyAI test.docx"  # 英文Word文档路径

    # 执行校验
    try:
        is_valid, message = validate_word_documents(chinese_doc_path, english_doc_path)
    except ValueError as ve:
        print(f"配置错误: {ve}")
        exit(1)
    except Exception as e:
        print(f"程序运行出错: {e}")
        exit(1)

    # 输出结果
    print("\n" + "=" * 50)
    print("整体校验结果:", "通过" if is_valid else "失败")
    print("=" * 50)
    print("详细报告:")
    print(message)

    # 将报告保存到文件
    report_file = "punctuation_validation_report.txt"
    with open(report_file, "w", encoding="utf-8") as f:
        f.write(message)
    print(f"\n报告已保存至: {report_file}")