import os
import subprocess
import tempfile
from PIL import Image
import io
import base64
import xml.etree.ElementTree as ET
from docx import Document
from docx.oxml.ns import qn
import zipfile
import shutil


class WordDocumentParser:
    def __init__(self):
        self.temp_dir = tempfile.mkdtemp()
        self.temp_files_to_cleanup = []  # 跟踪需要清理的临时文件
    
    def is_doc_file_corrupted(self, file_path):
        """检查.doc文件是否损坏"""
        try:
            with open(file_path, 'rb') as f:
                header = f.read(8)
                if len(header) < 8:
                    return True
                if header[:8] == b'\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1':
                    return False
                return True
        except Exception as e:
            print(f"文件读取错误: {e}")
            return True
    
    def is_docx_file_corrupted(self, file_path):
        """检查.docx文件是否损坏"""
        try:
            with zipfile.ZipFile(file_path, 'r') as zip_ref:
                required_files = ['[Content_Types].xml', '_rels/.rels', 'word/document.xml']
                file_list = zip_ref.namelist()
                
                for req_file in required_files:
                    if not any(req_file in f for f in file_list):
                        return True
                
                with zip_ref.open('word/document.xml') as f:
                    content = f.read(100)
                    if len(content) == 0:
                        return True
                
                return False
        except Exception as e:
            print(f".docx文件损坏检查错误: {e}")
            return True
    
    def repair_doc_file(self, file_path):
        """尝试修复损坏的.doc文件"""
        print("尝试修复.doc文件...")
        try:
            repair_cmd = ['soffice', '--headless', '--convert-to', 'docx', 
                         '--outdir', self.temp_dir, file_path]
            result = subprocess.run(repair_cmd, capture_output=True, timeout=30)
            
            if result.returncode == 0:
                repaired_path = os.path.join(self.temp_dir, 
                                           os.path.basename(file_path).replace('.doc', '.docx'))
                if os.path.exists(repaired_path):
                    self.temp_files_to_cleanup.append(repaired_path)  # 标记为需要清理
                    return repaired_path
        except Exception as e:
            print(f"修复失败: {e}")
        
        return None
    
    def convert_doc_to_docx(self, doc_path):
        """使用LibreOffice将.doc转换为.docx"""
        try:
            output_dir = self.temp_dir
            conv_cmd = [
                'soffice', '--headless', '--convert-to', 'docx',
                '--outdir', output_dir, doc_path
            ]
            
            result = subprocess.run(conv_cmd, capture_output=True, timeout=60)
            
            if result.returncode == 0:
                docx_filename = os.path.basename(doc_path).replace('.doc', '.docx')
                docx_path = os.path.join(output_dir, docx_filename)
                
                if os.path.exists(docx_path):
                    self.temp_files_to_cleanup.append(docx_path)  # 标记为需要清理
                    return docx_path
                else:
                    for file in os.listdir(output_dir):
                        if file.endswith('.docx'):
                            temp_docx_path = os.path.join(output_dir, file)
                            self.temp_files_to_cleanup.append(temp_docx_path)
                            return temp_docx_path
            
            print(f"转换失败: {result.stderr.decode()}")
            return None
            
        except Exception as e:
            print(f"转换过程中出错: {e}")
            return None
    
    def extract_hyperlinks_from_paragraph(self, paragraph):
        """修复后的超链接提取方法"""
        hyperlinks = []
        
        try:
            # 方法1: 直接检查runs中的超链接
            for run in paragraph.runs:
                # 使用hasattr检查属性是否存在
                if hasattr(run, 'hyperlink') and run.hyperlink is not None:
                    link_text = run.text.strip()
                    if link_text:
                        # 获取超链接地址
                        link_url = run.hyperlink.target if hasattr(run.hyperlink, 'target') else "#"
                        hyperlinks.append((link_text, link_url))
        except AttributeError as e:
            print(f"使用run.hyperlink方法失败: {e}")
            # 方法2: 使用XML解析作为备选方案
            try:
                hyperlinks.extend(self.extract_hyperlinks_from_xml(paragraph))
            except Exception as xml_error:
                print(f"XML解析也失败: {xml_error}")
        
        return hyperlinks
    
    def extract_hyperlinks_from_xml(self, paragraph):
        """通过XML解析提取超链接的备选方法"""
        hyperlinks = []
        
        try:
            # 获取段落的XML元素
            p_element = paragraph._p
            # 查找超链接元素
            hyperlink_elements = p_element.xpath('.//w:hyperlink')
            
            for hyperlink_element in hyperlink_elements:
                try:
                    # 获取关系ID
                    rel_id = hyperlink_element.get(qn('r:id'))
                    if rel_id and hasattr(paragraph.part, 'rels'):
                        # 通过关系获取目标URL
                        if rel_id in paragraph.part.rels:
                            rel = paragraph.part.rels[rel_id]
                            link_url = rel._target if hasattr(rel, '_target') else "#"
                            
                            # 提取链接文本
                            link_text_elements = hyperlink_element.xpath('.//w:t')
                            link_text = ''.join([elem.text for elem in link_text_elements if elem.text])
                            
                            if link_text:
                                hyperlinks.append((link_text, link_url))
                except Exception as e:
                    print(f"解析单个超链接时出错: {e}")
                    continue
                    
        except Exception as e:
            print(f"XML解析超链接失败: {e}")
        
        return hyperlinks
    
    def extract_text_from_paragraph(self, paragraph):
        """修复后的段落文本提取方法"""
        text_content = ""
        hyperlinks = self.extract_hyperlinks_from_paragraph(paragraph)
        
        # 如果有超链接，使用超链接的文本
        if hyperlinks:
            for link_text, link_url in hyperlinks:
                text_content += f"[{link_text}]({link_url}) "
        else:
            # 普通文本处理
            for run in paragraph.runs:
                run_text = run.text.strip()
                if run_text:
                    # 检查文本样式
                    if hasattr(run, 'bold') and run.bold:
                        run_text = f"{run_text}"
                    if hasattr(run, 'italic') and run.italic:
                        run_text = f"*{run_text}*"
                    if hasattr(run, 'underline') and run.underline:
                        run_text = f"__{run_text}__"
                    
                    text_content += run_text + " "
        
        return text_content.strip()
    
    def extract_all_hyperlinks(self, doc):
        """从整个文档中提取所有超链接"""
        all_hyperlinks = []
        
        for paragraph in doc.paragraphs:
            hyperlinks = self.extract_hyperlinks_from_paragraph(paragraph)
            all_hyperlinks.extend(hyperlinks)
        
        return all_hyperlinks
    
    def extract_images_from_docx(self, docx_file):
        """从.docx文件中提取图片，返回包含PIL图像对象的字典[6](@ref)"""
        images = []
        
        try:
            with zipfile.ZipFile(docx_file, 'r') as zip_ref:
                image_files = [f for f in zip_ref.namelist() 
                             if f.startswith('word/media/') and 
                             f.split('.')[-1].lower() in ['png', 'jpg', 'jpeg', 'gif', 'bmp']]
                
                for img_path in image_files:
                    try:
                        with zip_ref.open(img_path) as img_file:
                            img_data = img_file.read()
                            pil_image = Image.open(io.BytesIO(img_data))
                            
                            # 创建图片信息字典，包含PIL图像对象和其他元数据[5,6](@ref)
                            image_info = {
                                'name': os.path.basename(img_path),
                                'pil_image': pil_image,
                                'format': pil_image.format,
                                'size': pil_image.size,
                                'mode': pil_image.mode
                            }
                            
                            images.append(image_info)
                    except Exception as e:
                        print(f"处理图片 {img_path} 时出错: {e}")
                        continue
                        
        except Exception as e:
            print(f"提取图片时出错: {e}")
        
        return images
    
    def table_to_markdown(self, table):
        """将表格转换为Markdown格式"""
        if not table.rows:
            return ""
        
        markdown_table = "\n"
        
        headers = []
        if table.rows:
            header_row = table.rows[0]
            for cell in header_row.cells:
                headers.append(cell.text.strip() or " ")
        
        markdown_table += "| " + " | ".join(headers) + " |\n"
        markdown_table += "|" + "|".join(["---"] * len(headers)) + "|\n"
        
        for i, row in enumerate(table.rows):
            if i == 0:
                continue
                
            row_data = []
            for cell in row.cells:
                row_data.append(cell.text.strip().replace('|', '\\|') or " ")
            
            if row_data:
                markdown_table += "| " + " | ".join(row_data) + " |\n"
        
        return markdown_table + "\n"
    
    def parse_docx_to_markdown(self, docx_path):
        """修复后的.docx解析方法，返回包含markdown文本和图片字典的结果"""
        try:
            doc = Document(docx_path)
            markdown_content = []
            images = self.extract_images_from_docx(docx_path)
            
            # 提取所有超链接
            all_hyperlinks = self.extract_all_hyperlinks(doc)
            
            # 处理文档属性
            core_properties = doc.core_properties
            if hasattr(core_properties, 'title') and core_properties.title:
                markdown_content.append(f"# {core_properties.title}\n")
            
            # 处理段落
            for paragraph in doc.paragraphs:
                if paragraph.text.strip():
                    text_content = self.extract_text_from_paragraph(paragraph)
                    
                    if text_content:
                        # 判断段落样式
                        style_name = paragraph.style.name if hasattr(paragraph, 'style') and paragraph.style else ""
                        if 'Heading' in style_name or '标题' in style_name:
                            level = 1
                            if '2' in style_name:
                                level = 2
                            elif '3' in style_name:
                                level = 3
                            markdown_content.append(f"{'#' * level} {text_content}\n")
                        else:
                            markdown_content.append(f"{text_content}  \n")
            
            # 处理表格
            for table in doc.tables:
                table_md = self.table_to_markdown(table)
                markdown_content.append(table_md)
            
            # 添加图片占位符到markdown
            if images:
                markdown_content.append("\n## 图片\n")
                for i, img in enumerate(images):
                    markdown_content.append(f"![图片{i+1}: {img['name']}](image_{i+1})  \n")
            
            # 添加超链接汇总
            if all_hyperlinks:
                markdown_content.append("\n## 超链接汇总\n")
                for text, url in all_hyperlinks:
                    markdown_content.append(f"- [{text}]({url})\n")
            
            # 返回包含markdown文本和图片字典的结果[5,6](@ref)
            result = {
                'markdown': "\n".join(markdown_content),
                'images': images,  # 包含PIL图像对象的列表
                'metadata': {
                    'title': getattr(core_properties, 'title', ''),
                    'author': getattr(core_properties, 'author', ''),
                    'created': getattr(core_properties, 'created', None),
                    'modified': getattr(core_properties, 'modified', None),
                    'hyperlink_count': len(all_hyperlinks),
                    'image_count': len(images)
                }
            }
            
            return result
            
        except Exception as e:
            print(f"解析.docx文件时出错: {e}")
            return None
    
    def parse_word_document(self, file_path):
        """主解析函数，返回包含markdown和图片字典的结果"""
        print(f"开始解析文档: {file_path}")
        
        if not os.path.exists(file_path):
            return {"error": "文件不存在"}
        
        file_ext = os.path.splitext(file_path)[1].lower()
        
        try:
            if file_ext == '.doc':
                print("检测到.doc格式文件")
                
                if self.is_doc_file_corrupted(file_path):
                    print("文件可能已损坏，尝试修复...")
                    repaired_path = self.repair_doc_file(file_path)
                    if repaired_path:
                        file_path = repaired_path
                        file_ext = '.docx'
                    else:
                        return {"error": "无法修复损坏的.doc文件"}
                
                docx_path = self.convert_doc_to_docx(file_path)
                if not docx_path:
                    return {"error": ".doc到.docx转换失败"}
                
                print(f"成功转换为: {docx_path}")
                result = self.parse_docx_to_markdown(docx_path)
                
                # 立即清理临时文件[1](@ref)
                self.cleanup_temp_files()
                
                return result
            
            elif file_ext == '.docx':
                print("检测到.docx格式文件")
                
                if self.is_docx_file_corrupted(file_path):
                    return {"error": ".docx文件可能已损坏"}
                
                result = self.parse_docx_to_markdown(file_path)
                return result
            
            else:
                return {"error": f"不支持的文件格式 {file_ext}"}
                
        except Exception as e:
            return {"error": f"解析过程中发生错误: {str(e)}"}
    
    def cleanup_temp_files(self):
        """清理临时生成的文件[1](@ref)"""
        for temp_file in self.temp_files_to_cleanup:
            try:
                if os.path.exists(temp_file):
                    os.remove(temp_file)
                    print(f"已删除临时文件: {temp_file}")
            except Exception as e:
                print(f"删除临时文件 {temp_file} 时出错: {e}")
        
        # 清空待清理列表
        self.temp_files_to_cleanup = []
    
    def cleanup(self):
        """完全清理所有临时资源[1](@ref)"""
        self.cleanup_temp_files()
        
        # 删除临时目录[1](@ref)
        try:
            if os.path.exists(self.temp_dir):
                shutil.rmtree(self.temp_dir)
                print(f"已删除临时目录: {self.temp_dir}")
        except Exception as e:
            print(f"删除临时目录时出错: {e}")


def parse_word(file_path):
    """
    解析Word文档的主函数
    
    参数:
        file_path: Word文档路径(.doc或.docx)
    
    返回:
        字典包含:
        - 'markdown': Markdown格式的文本内容
        - 'images': 包含PIL图像对象的列表
        - 'metadata': 文档元数据
        或包含'error'键的字典表示出错
    """
    parser = WordDocumentParser()
    
    try:
        if os.path.exists(file_path):
            result = parser.parse_word_document(file_path)
            return result
        else:
            return {"error": "文件不存在"}
    except Exception as e:
        return {"error": f"解析过程中发生异常: {str(e)}"}
    finally:
        parser.cleanup()


if __name__ == "__main__":
    # 使用示例
    result = parse_word("path/to/your/example.doc")
    
    if 'error' not in result:
        print("解析成功!")
        print("Markdown内容长度:", len(result['markdown']))
        print("图片数量:", len(result['images']))
        print("文档标题:", result['metadata']['title'])
        
        # 处理图片 - 可以保存或进一步处理
        for i, img_info in enumerate(result['images']):
            print(f"图片 {i+1}: {img_info['name']} - 尺寸: {img_info['size']}")
            # 可以在这里对PIL图像进行操作，例如保存:
            # img_info['pil_image'].save(f"image_{i+1}.png")
    else:
        print("解析失败:", result['error'])