import traceback
import re

import olefile
from bs4 import BeautifulSoup
from docx import Document
import textract
from pydocx import PyDocX


def extract_text_from_docx(file_path: str) -> str:
    """将文件解析为文本"""
    try:
        doc = Document(file_path)
        return "\n".join([para.text for para in doc.paragraphs])
    except Exception as e:
        print(traceback.format_exc())
        print(f"解析文件失败: {str(e)}")
        return ""


# def extract_text_from_doc(file_path: str) -> str:
#     with olefile.OleFileIO(file_path) as ole:
#         # 检查是否包含 Word 文档流
#         if not ole.exists('WordDocument'):
#             raise ValueError("不是有效的 Word .doc 文件")
#
#         # 提取 Word 文档文本
#         doc_stream = ole.openstream('WordDocument')
#         doc_data = doc_stream.read()
#
#         # 尝试检测文本编码
#         try:
#             # 先尝试 UTF-8
#             text = doc_data.decode('utf-8')
#         except UnicodeDecodeError:
#             # 再尝试 CP1252 (Windows 编码)
#             try:
#                 text = doc_data.decode('cp1252')
#             except UnicodeDecodeError:
#                 # 无法解码，返回原始字节的十六进制表示
#                 text = doc_data.hex()
#
#         # 简单清理文本
#         cleaned_text = text.replace('\x00', '').strip()
#
#         # 尝试使用 BeautifulSoup 进一步解析（如果包含 HTML 结构）
#         try:
#             soup = BeautifulSoup(cleaned_text, 'html.parser')
#             return soup.get_text('\n')
#         except Exception as e:
#             print(traceback.format_exc())
#             # 如果不是 HTML，直接返回清理后的文本
#             return cleaned_text

import struct
import zlib
import re
from collections import defaultdict


def extract_text_from_doc(file_path):
    """
    使用纯Python解析.doc文件（二进制格式）
    :param file_path: .doc文件路径
    :return: 提取的文本内容
    """
    try:
        with open(file_path, 'rb') as f:
            data = f.read()

        # 检查文件头，确认是.doc文件
        if data[:8] != b'\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1':
            raise ValueError("这不是一个有效的.doc文件")

        # 解析文件目录结构
        file_structure = parse_file_structure(data)

        # 提取WordDocument流
        word_doc_stream = extract_stream(data, file_structure, "WordDocument")
        if not word_doc_stream:
            raise ValueError("未找到WordDocument流")

        # 解析文本内容
        text = extract_text(word_doc_stream, file_structure)

        # 清理文本中的特殊字符
        return clean_text(text)

    except Exception as e:
        return f"解析失败: {str(e)}"


def parse_file_structure(data):
    """解析复合文件二进制结构"""
    # 解析头部信息
    header = data[:512]
    sector_size = 2 ** struct.unpack_from('<H', header, 30)[0]
    short_sector_size = 2 ** struct.unpack_from('<H', header, 32)[0]

    # 解析目录扇区
    dir_sectors = []
    dir_sector_start = struct.unpack_from('<I', header, 48)[0]
    num_dir_sectors = struct.unpack_from('<I', header, 40)[0]

    for i in range(num_dir_sectors):
        start = 512 + dir_sector_start * sector_size + i * sector_size
        dir_sectors.append(data[start:start + sector_size])

    # 解析目录条目
    entries = []
    for sector in dir_sectors:
        for i in range(0, sector_size, 128):
            entry = sector[i:i + 128]
            if entry[0] == 0 or entry[0] == 0xFF:
                continue  # 空闲或无效条目

            name = entry[:64].decode('utf-16le', errors='ignore').strip('\x00')
            entry_type = entry[66]
            start_sector = struct.unpack_from('<I', entry, 116)[0]
            size = struct.unpack_from('<I', entry, 120)[0]

            entries.append({
                'name': name,
                'type': entry_type,
                'start_sector': start_sector,
                'size': size
            })

    return {
        'sector_size': sector_size,
        'short_sector_size': short_sector_size,
        'entries': entries
    }


def extract_stream(data, file_structure, stream_name):
    """从复合文件中提取指定名称的流"""
    sector_size = file_structure['sector_size']
    entries = file_structure['entries']

    # 查找指定的流条目
    stream_entry = next((e for e in entries if e['name'] == stream_name), None)
    if not stream_entry:
        return None

    # 提取流数据
    stream_data = b''
    sector = stream_entry['start_sector']
    size = stream_entry['size']

    # 特殊值表示未分配
    if sector == 0xFFFFFFFE:
        return b''

    # 遍历扇区链
    while sector != 0xFFFFFFFE and len(stream_data) < size:
        start = 512 + sector * sector_size
        end = start + sector_size
        chunk = data[start:end]

        if len(stream_data) + len(chunk) > size:
            chunk = chunk[:size - len(stream_data)]

        stream_data += chunk
        sector = struct.unpack_from('<I', data, start + sector_size - 4)[0]

    return stream_data[:size]


def extract_text(stream_data, file_structure):
    """从WordDocument流中提取文本"""
    # 检查是否压缩
    is_compressed = (stream_data[10] & 1) == 0

    # 解压缩（如果必要）
    if is_compressed:
        # 跳过头部
        text_start = 512
        # 解压缩文本部分
        try:
            decompressed = zlib.decompress(stream_data[text_start:], -15)
            return decompressed.decode('utf-16le', errors='ignore')
        except:
            # 如果解压缩失败，尝试直接解码
            return stream_data[text_start:].decode('utf-16le', errors='ignore')
    else:
        # 未压缩的文档
        return stream_data.decode('utf-16le', errors='ignore')


def clean_text(text):
    """清理文本中的特殊字符"""
    # 移除ASCII控制字符（保留制表符、换行符和回车符）
    text = re.sub(r'[\x00-\x08\x0B\x0C\x0E-\x1F]', '', text)

    # 移除Unicode控制字符
    text = re.sub(r'[\u200B-\u200D\uFEFF]', '', text)

    # 替换多个连续空白字符
    text = re.sub(r'\s+', ' ', text)

    # 移除页眉/页脚标记
    text = re.sub(r'(\[|\])[^\]]*?(\[|\])', '', text)

    return text.strip()


# 使用示例
if __name__ == "__main__":
    file_path = "sample.doc"  # 替换为你的.doc文件路径
    text = parse_doc(file_path)
    print("提取的文本内容:")
    print("=" * 80)
    print(text[:2000] + "..." if len(text) > 2000 else text)
    print("=" * 80)
    print(f"提取字符数: {len(text)}")