import os
import re
import tempfile
import chardet
from pathlib import Path
from bs4 import BeautifulSoup
from PIL import Image
import pandas as pd
import email
from email import policy
from email.parser import BytesParser
import base64
import hashlib

class DocumentParser:
    def __init__(self):
        self.temp_files = []
        
    def __del__(self):
        """清理临时文件"""
        self.cleanup()
    
    def cleanup(self):
        """删除所有临时文件"""
        for temp_file in self.temp_files:
            try:
                if os.path.exists(temp_file):
                    os.remove(temp_file)
            except Exception as e:
                print(f"删除临时文件失败 {temp_file}: {e}")
        self.temp_files = []
    
    def check_file_integrity(self, file_path):
        """检查文件是否损坏[14](@ref)"""
        try:
            # 检查文件是否存在
            if not os.path.exists(file_path):
                return False, "文件不存在"
            
            # 检查文件大小
            file_size = os.path.getsize(file_path)
            if file_size == 0:
                return False, "文件为空"
            
            # 计算文件哈希值进行完整性检查[14](@ref)
            with open(file_path, 'rb') as f:
                content = f.read()
                file_hash = hashlib.md5(content).hexdigest()
            
            return True, "文件完整"
        except Exception as e:
            return False, f"文件损坏: {str(e)}"
    
    def detect_and_fix_encoding(self, file_path):
        """检测和修复文件编码问题[1,5](@ref)"""
        try:
            # 首先尝试常见编码
            encodings = ['utf-8', 'gbk', 'gb2312', 'iso-8859-1', 'latin-1']
            
            for encoding in encodings:
                try:
                    with open(file_path, 'r', encoding=encoding) as f:
                        content = f.read()
                    # 如果成功读取且没有编码错误，返回内容和编码
                    return content, encoding
                except (UnicodeDecodeError, UnicodeError):
                    continue
            
            # 如果常见编码都失败，使用chardet检测编码[1,5](@ref)
            with open(file_path, 'rb') as f:
                raw_data = f.read()
                detected_encoding = chardet.detect(raw_data)['encoding']
            
            if detected_encoding:
                try:
                    content = raw_data.decode(detected_encoding)
                    return content, detected_encoding
                except (UnicodeDecodeError, UnicodeError):
                    pass
            
            # 最后尝试忽略错误
            content = raw_data.decode('utf-8', errors='ignore')
            return content, 'utf-8 (忽略错误)'
            
        except Exception as e:
            raise Exception(f"编码检测失败: {str(e)}")
    
    def parse_html(self, file_path):
        """解析HTML文件[1,5,6](@ref)"""
        try:
            # 检查文件完整性[14](@ref)
            is_ok, message = self.check_file_integrity(file_path)
            if not is_ok:
                raise Exception(f"HTML文件损坏: {message}")
            
            # 检测和修复编码
            content, encoding = self.detect_and_fix_encoding(file_path)
            
            # 使用BeautifulSoup解析HTML[1,6](@ref)
            soup = BeautifulSoup(content, 'html.parser')
            
            result = {
                'text': '',
                'images': {},
                'tables': [],
                'links': [],
                'metadata': {
                    'type': 'html',
                    'encoding': encoding,
                    'title': self._extract_title(soup)
                }
            }
            
            # 提取文本内容
            result['text'] = self._extract_text(soup)
            
            # 提取图片
            result['images'] = self._extract_images(soup, file_path)
            
            # 提取表格
            result['tables'] = self._extract_tables(soup)
            
            # 提取超链接
            result['links'] = self._extract_links(soup)
            
            return result
            
        except Exception as e:
            raise Exception(f"HTML解析失败: {str(e)}")
    
    def parse_mhtml(self, file_path):
        """解析MHTML文件[9,10,11](@ref)"""
        try:
            # 检查文件完整性
            is_ok, message = self.check_file_integrity(file_path)
            if not is_ok:
                raise Exception(f"MHTML文件损坏: {message}")
            
            result = {
                'text': '',
                'images': {},
                'tables': [],
                'links': [],
                'metadata': {
                    'type': 'mhtml',
                    'parts': []
                }
            }
            
            # 解析MHTML文件[9,10](@ref)
            with open(file_path, 'rb') as f:
                msg = BytesParser(policy=policy.default).parse(f)
            
            # 处理MHTML的各个部分
            for part in msg.iter_parts():
                content_type = part.get_content_type()
                charset = part.get_content_charset('utf-8')
                content_location = part.get('Content-Location', '')
                
                result['metadata']['parts'].append({
                    'content_type': content_type,
                    'content_location': content_location
                })
                
                # 处理HTML部分
                if content_type == 'text/html':
                    try:
                        html_content = part.get_content()
                        soup = BeautifulSoup(html_content, 'html.parser')
                        
                        # 提取文本
                        result['text'] = self._extract_text(soup)
                        
                        # 提取图片（从MHTML部分）
                        result['images'].update(self._extract_images_from_mhtml(part, msg))
                        
                        # 提取表格
                        result['tables'].extend(self._extract_tables(soup))
                        
                        # 提取链接
                        result['links'].extend(self._extract_links(soup))
                        
                    except Exception as e:
                        print(f"处理HTML部分失败: {e}")
                        continue
                
                # 处理图片部分
                elif content_type.startswith('image/'):
                    try:
                        image_data = part.get_payload(decode=True)
                        if image_data:
                            image_name = content_location or f"image_{len(result['images']) + 1}"
                            image = Image.open(io.BytesIO(image_data))
                            result['images'][image_name] = image
                    except Exception as e:
                        print(f"处理图片部分失败: {e}")
                        continue
            
            return result
            
        except Exception as e:
            raise Exception(f"MHTML解析失败: {str(e)}")
    
    def parse_txt(self, file_path):
        """解析TXT文件[12,13](@ref)"""
        try:
            # 检查文件完整性
            is_ok, message = self.check_file_integrity(file_path)
            if not is_ok:
                raise Exception(f"TXT文件损坏: {message}")
            
            # 检测和修复编码
            content, encoding = self.detect_and_fix_encoding(file_path)
            
            result = {
                'text': content,
                'images': {},
                'tables': [],
                'links': self._extract_links_from_text(content),
                'metadata': {
                    'type': 'txt',
                    'encoding': encoding
                }
            }
            
            # 尝试从文本中提取表格数据（简单的表格识别）
            result['tables'] = self._extract_tables_from_text(content)
            
            return result
            
        except Exception as e:
            raise Exception(f"TXT解析失败: {str(e)}")
    
    def _extract_title(self, soup):
        """提取文档标题"""
        title_tag = soup.find('title')
        return title_tag.get_text().strip() if title_tag else ""
    
    def _extract_text(self, soup):
        """提取文本内容"""
        # 移除脚本和样式标签
        for script in soup(["script", "style"]):
            script.decompose()
        
        # 获取文本
        text = soup.get_text()
        
        # 清理文本
        lines = (line.strip() for line in text.splitlines())
        chunks = (phrase.strip() for line in lines for phrase in line.split("  "))
        text = ' '.join(chunk for chunk in chunks if chunk)
        
        return text
    
    def _extract_images(self, soup, base_path):
        """提取图片[6](@ref)"""
        images = {}
        img_tags = soup.find_all('img')
        
        for i, img_tag in enumerate(img_tags):
            try:
                src = img_tag.get('src', '')
                if not src:
                    continue
                
                # 处理base64编码的图片
                if src.startswith('data:image'):
                    import io
                    import base64
                    # 提取base64数据
                    image_data = re.search(r'base64,(.*)', src).group(1)
                    image_data = base64.b64decode(image_data)
                    image = Image.open(io.BytesIO(image_data))
                    images[f'image_{i+1}'] = image
                
                # 处理本地图片路径（简化处理，实际需要解析相对路径）
                elif not src.startswith(('http://', 'https://')):
                    # 在实际应用中，这里需要处理相对路径
                    # 简化处理：跳过本地文件读取
                    pass
                    
            except Exception as e:
                print(f"提取图片失败: {e}")
                continue
        
        return images
    
    def _extract_images_from_mhtml(self, html_part, message):
        """从MHTML中提取图片[9](@ref)"""
        images = {}
        
        try:
            # 获取HTML内容
            html_content = html_part.get_payload(decode=True)
            if not html_content:
                return images
            
            # 解析HTML寻找图片引用
            soup = BeautifulSoup(html_content, 'html.parser')
            img_tags = soup.find_all('img')
            
            for img_tag in img_tags:
                try:
                    src = img_tag.get('src', '')
                    if not src:
                        continue
                    
                    # 查找对应的图片部分
                    for part in message.iter_parts():
                        content_location = part.get('Content-Location', '')
                        content_id = part.get('Content-ID', '').strip('<>')
                        
                        if (content_location and src.endswith(content_location)) or \
                           (content_id and f"cid:{content_id}" in src):
                            
                            image_data = part.get_payload(decode=True)
                            if image_data:
                                import io
                                image = Image.open(io.BytesIO(image_data))
                                image_name = content_location or f"image_{len(images) + 1}"
                                images[image_name] = image
                                break
                                
                except Exception as e:
                    print(f"提取MHTML图片失败: {e}")
                    continue
        
        except Exception as e:
            print(f"处理MHTML图片失败: {e}")
        
        return images
    
    def _extract_tables(self, soup):
        """提取表格并转换为Markdown格式[6,7](@ref)"""
        tables = []
        table_tags = soup.find_all('table')
        
        for table in table_tags:
            try:
                # 使用pandas读取HTML表格[7](@ref)
                html_str = str(table)
                df_list = pd.read_html(html_str)
                
                if df_list:
                    df = df_list[0]
                    
                    # 转换为Markdown表格
                    markdown_table = self._dataframe_to_markdown(df)
                    tables.append(markdown_table)
                    
            except Exception as e:
                print(f"提取表格失败: {e}")
                continue
        
        return tables
    
    def _extract_tables_from_text(self, text):
        """从文本中识别简单表格"""
        tables = []
        lines = text.split('\n')
        
        # 简单的表格识别逻辑（基于对齐和分隔符）
        table_lines = []
        in_table = False
        
        for line in lines:
            # 检测表格行（包含多个空格或制表符分隔的列）
            if re.match(r'^(.+?)(\s{2,}|\t)(.+?)$', line.strip()):
                if not in_table:
                    in_table = True
                    table_lines = []
                table_lines.append(line.strip())
            else:
                if in_table and table_lines:
                    # 尝试将检测到的表格行转换为Markdown
                    try:
                        # 简单的空格分隔处理
                        rows = []
                        for table_line in table_lines:
                            columns = re.split(r'\s{2,}', table_line)
                            rows.append(columns)
                        
                        if len(rows) >= 2:
                            # 创建简单的Markdown表格
                            header = '| ' + ' | '.join(rows[0]) + ' |'
                            separator = '| ' + ' | '.join(['---'] * len(rows[0])) + ' |'
                            body = ''
                            
                            for row in rows[1:]:
                                body += '| ' + ' | '.join(row) + ' |\n'
                            
                            markdown_table = f"{header}\n{separator}\n{body}"
                            tables.append(markdown_table)
                            
                    except Exception as e:
                        print(f"转换文本表格失败: {e}")
                    
                    in_table = False
                    table_lines = []
        
        return tables
    
    def _extract_links(self, soup):
        """提取超链接"""
        links = []
        a_tags = soup.find_all('a', href=True)
        
        for a_tag in a_tags:
            href = a_tag.get('href', '')
            text = a_tag.get_text(strip=True)
            if href and text:
                links.append({'text': text, 'url': href})
        
        return links
    
    def _extract_links_from_text(self, text):
        """从文本中提取URL链接[13](@ref)"""
        links = []
        # 使用正则表达式匹配URL[13](@ref)
        url_pattern = r'https?://[^\s<>\"{}|\\^`[\]]+'
        urls = re.findall(url_pattern, text)
        
        for url in urls:
            # 简单截断长URL作为显示文本
            display_text = url[:50] + '...' if len(url) > 50 else url
            links.append({'text': display_text, 'url': url})
        
        return links
    
    def _dataframe_to_markdown(self, df):
        """将DataFrame转换为Markdown表格"""
        if df.empty:
            return ""
        
        # 表头
        headers = '| ' + ' | '.join(map(str, df.columns)) + ' |'
        # 分隔线
        separator = '| ' + ' | '.join(['---'] * len(df.columns)) + ' |'
        # 表格内容
        rows = []
        for _, row in df.iterrows():
            row_str = '| ' + ' | '.join(map(str, row.values)) + ' |'
            rows.append(row_str)
        
        return '\n'.join([headers, separator] + rows)
    
    def to_markdown(self, parse_result):
        """将解析结果转换为Markdown格式"""
        markdown_parts = []
        
        # 添加标题
        if parse_result['metadata'].get('title'):
            markdown_parts.append(f"# {parse_result['metadata']['title']}\n")
        
        # 添加文本内容
        if parse_result['text']:
            markdown_parts.append("## 文本内容\n")
            markdown_parts.append(parse_result['text'] + "\n")
        
        # 添加表格
        if parse_result['tables']:
            markdown_parts.append("\n## 表格\n")
            for i, table in enumerate(parse_result['tables']):
                markdown_parts.append(f"### 表格 {i+1}\n")
                markdown_parts.append(table + "\n")
        
        # 添加链接
        if parse_result['links']:
            markdown_parts.append("\n## 超链接\n")
            for link in parse_result['links']:
                markdown_parts.append(f"- [{link['text']}]({link['url']})\n")
        
        # 添加图片引用（在Markdown中图片会被保存为文件）
        if parse_result['images']:
            markdown_parts.append("\n## 图片\n")
            for i, (name, image) in enumerate(parse_result['images'].items()):
                markdown_parts.append(f"![图片 {i+1}]({name})\n")
        
        return ''.join(markdown_parts)
    
    def parse_document(self, file_path):
        """主解析函数"""
        file_ext = Path(file_path).suffix.lower()
        
        try:
            if file_ext in ['.html', '.htm']:
                result = self.parse_html(file_path)
            elif file_ext in ['.mhtml', '.mht']:
                result = self.parse_mhtml(file_path)
            elif file_ext == '.txt':
                result = self.parse_txt(file_path)
            else:
                raise ValueError(f"不支持的文件格式: {file_ext}")
            
            # 转换为Markdown
            markdown_text = self.to_markdown(result)
            result['markdown'] = markdown_text
            
            return result
            
        except Exception as e:
            self.cleanup()
            raise e

# 使用示例
def parse_text():
    parser = DocumentParser()
    
    try:
        # 示例：解析HTML文件
        # result = parser.parse_document('example.html')
        
        # 示例：解析TXT文件
        # result = parser.parse_document('example.txt')
        
        # 示例：解析MHTML文件
        # result = parser.parse_document('example.mhtml')
        
        # 输出结果示例
        # print("Markdown内容:")
        # print(result['markdown'])
        # print("\n图片数量:", len(result['images']))
        # print("表格数量:", len(result['tables']))
        # print("链接数量:", len(result['links']))
        
        pass
        
    except Exception as e:
        print(f"解析失败: {e}")
    finally:
        parser.cleanup()


if __name__ == "__main__":
    import io
    parse_text()