import streamlit as st
import json
import csv
import zipfile
import pandas as pd
from typing import Dict, Any, List
from datetime import datetime
from io import BytesIO, StringIO
import os

def render_download_page():
    """渲染下载页面"""
    
    st.header("📥 结果下载")
    st.markdown("导出处理结果为多种格式，适用于不同的向量数据库")
    
    # 检查是否有处理结果
    if not st.session_state.get('processing_results'):
        st.info("📝 暂无处理结果，请先在主页面上传并处理文档。")
        return
    
    results = st.session_state['processing_results']
    
    # 创建标签页
    tab1, tab2, tab3, tab4 = st.tabs(["🎯 快速导出", "⚙️ 自定义导出", "📊 批量下载", "🔧 格式转换"])
    
    with tab1:
        render_quick_export(results)
    
    with tab2:
        render_custom_export(results)
    
    with tab3:
        render_batch_download(results)
    
    with tab4:
        render_format_conversion(results)

def render_quick_export(results: Dict[str, Any]):
    """渲染快速导出"""
    
    st.subheader("🎯 快速导出")
    st.markdown("选择预设格式，一键导出处理结果")
    
    # 预设格式选项
    col1, col2 = st.columns(2)
    
    with col1:
        st.markdown("**🔥 热门格式**")
        
        if st.button("📄 Dify 格式 (JSON)", use_container_width=True):
            export_dify_format(results)
        
        if st.button("🌊 Ragflow 格式 (JSON)", use_container_width=True):
            export_ragflow_format(results)
        
        if st.button("📝 纯文本 (TXT)", use_container_width=True):
            export_plain_text(results)
    
    with col2:
        st.markdown("**📊 数据格式**")
        
        if st.button("📈 CSV 表格", use_container_width=True):
            export_csv_format(results)
        
        if st.button("📋 Excel 工作簿", use_container_width=True):
            export_excel_format(results)
        
        if st.button("🗂️ Markdown 文档", use_container_width=True):
            export_markdown_format(results)
    
    # 导出统计
    st.markdown("---")
    render_export_statistics(results)

def render_custom_export(results: Dict[str, Any]):
    """渲染自定义导出"""
    
    st.subheader("⚙️ 自定义导出")
    st.markdown("自定义导出格式和内容")
    
    # 导出格式选择
    export_format = st.selectbox(
        "导出格式",
        ["JSON", "CSV", "TXT", "Markdown", "XML", "YAML"],
        help="选择导出的文件格式"
    )
    
    # 内容选择
    st.markdown("**📋 导出内容**")
    
    col1, col2 = st.columns(2)
    
    with col1:
        include_metadata = st.checkbox("包含元数据", value=True)
        include_original = st.checkbox("包含原始内容", value=False)
        include_cleaned = st.checkbox("包含清洗后内容", value=True)
    
    with col2:
        include_chunks = st.checkbox("包含切片数据", value=True)
        include_statistics = st.checkbox("包含统计信息", value=True)
        include_structure = st.checkbox("包含结构信息", value=False)
    
    # 文件选择
    files = results.get('files', [])
    if files:
        st.markdown("**📁 文件选择**")
        
        file_options = [f"{i+1}. {file_result.get('filename', f'文件{i+1}')}" for i, file_result in enumerate(files)]
        selected_files = st.multiselect(
            "选择要导出的文件",
            range(len(file_options)),
            default=list(range(len(file_options))),
            format_func=lambda x: file_options[x]
        )
    else:
        selected_files = []
    
    # 高级选项
    with st.expander("🔧 高级选项"):
        # 切片过滤
        st.markdown("**切片过滤**")
        
        col1, col2 = st.columns(2)
        with col1:
            min_chunk_size = st.number_input("最小切片大小", min_value=0, value=0, help="过滤掉小于指定字符数的切片")
        with col2:
            max_chunk_size = st.number_input("最大切片大小", min_value=0, value=0, help="过滤掉大于指定字符数的切片，0表示无限制")
        
        # 字段映射
        st.markdown("**字段映射**")
        
        col1, col2 = st.columns(2)
        with col1:
            content_field = st.text_input("内容字段名", value="content", help="切片内容的字段名")
            id_field = st.text_input("ID字段名", value="id", help="切片ID的字段名")
        with col2:
            metadata_field = st.text_input("元数据字段名", value="metadata", help="元数据的字段名")
            source_field = st.text_input("来源字段名", value="source", help="来源文件的字段名")
    
    # 导出按钮
    if st.button("🚀 开始自定义导出", type="primary", use_container_width=True):
        if selected_files:
            export_options = {
                'format': export_format,
                'include_metadata': include_metadata,
                'include_original': include_original,
                'include_cleaned': include_cleaned,
                'include_chunks': include_chunks,
                'include_statistics': include_statistics,
                'include_structure': include_structure,
                'selected_files': selected_files,
                'min_chunk_size': min_chunk_size,
                'max_chunk_size': max_chunk_size,
                'field_mapping': {
                    'content': content_field,
                    'id': id_field,
                    'metadata': metadata_field,
                    'source': source_field
                }
            }
            
            export_custom_format(results, export_options)
        else:
            st.warning("请至少选择一个文件进行导出")

def render_batch_download(results: Dict[str, Any]):
    """渲染批量下载"""
    
    st.subheader("📊 批量下载")
    st.markdown("批量导出多种格式，打包下载")
    
    # 批量导出选项
    st.markdown("**📦 批量导出选项**")
    
    formats_to_export = st.multiselect(
        "选择要导出的格式",
        ["Dify JSON", "Ragflow JSON", "CSV", "TXT", "Markdown", "Excel"],
        default=["Dify JSON", "CSV", "TXT"]
    )
    
    # 文件组织方式
    organization = st.radio(
        "文件组织方式",
        ["按格式分组", "按源文件分组", "扁平结构"],
        help="选择导出文件的组织方式"
    )
    
    # 压缩选项
    compression = st.selectbox(
        "压缩格式",
        ["ZIP", "无压缩"],
        help="选择文件压缩格式"
    )
    
    # 批量导出按钮
    if st.button("📦 开始批量导出", type="primary", use_container_width=True):
        if formats_to_export:
            batch_export_options = {
                'formats': formats_to_export,
                'organization': organization,
                'compression': compression
            }
            
            create_batch_download(results, batch_export_options)
        else:
            st.warning("请至少选择一种导出格式")

def render_format_conversion(results: Dict[str, Any]):
    """渲染格式转换"""
    
    st.subheader("🔧 格式转换")
    st.markdown("在不同向量数据库格式之间转换")
    
    # 转换选项
    col1, col2 = st.columns(2)
    
    with col1:
        source_format = st.selectbox(
            "源格式",
            ["Dify", "Ragflow", "LangChain", "通用JSON", "CSV"],
            help="当前数据的格式"
        )
    
    with col2:
        target_format = st.selectbox(
            "目标格式",
            ["Dify", "Ragflow", "LangChain", "通用JSON", "CSV"],
            help="要转换到的格式"
        )
    
    # 格式说明
    with st.expander("📖 格式说明"):
        st.markdown("""
        **Dify格式**: 适用于Dify平台的向量数据库导入
        - 字段: `content`, `metadata`, `source`
        - 特点: 支持丰富的元数据结构
        
        **Ragflow格式**: 适用于Ragflow平台的向量数据库导入
        - 字段: `text`, `meta`, `doc_id`
        - 特点: 注重文档层级结构
        
        **LangChain格式**: 适用于LangChain框架
        - 字段: `page_content`, `metadata`
        - 特点: 简洁的文档表示
        
        **通用JSON**: 标准的JSON格式
        - 字段: 可自定义
        - 特点: 兼容性好，易于处理
        
        **CSV格式**: 表格格式
        - 字段: 列标题可自定义
        - 特点: 易于在Excel等工具中查看
        """)
    
    # 转换映射
    if source_format != target_format:
        st.markdown("**🔄 字段映射**")
        
        # 显示字段映射关系
        mapping_info = get_format_mapping(source_format, target_format)
        
        for source_field, target_field in mapping_info.items():
            st.markdown(f"• `{source_field}` → `{target_field}`")
        
        # 转换按钮
        if st.button("🔄 开始格式转换", type="primary", use_container_width=True):
            convert_format(results, source_format, target_format)
    else:
        st.info("源格式和目标格式相同，无需转换")

def export_dify_format(results: Dict[str, Any]):
    """导出Dify格式"""
    
    try:
        dify_data = []
        
        for file_result in results.get('files', []):
            filename = file_result.get('filename', '未知文件')
            
            for i, chunk in enumerate(file_result.get('chunks', [])):
                dify_item = {
                    'content': chunk.get('content', ''),
                    'metadata': {
                        'source': filename,
                        'chunk_id': chunk.get('id', f'chunk_{i}'),
                        'chunk_index': i,
                        'file_format': file_result.get('format', ''),
                        'processing_time': file_result.get('processing_time', 0),
                        **chunk.get('metadata', {})
                    },
                    'source': filename
                }
                dify_data.append(dify_item)
        
        # 生成JSON文件
        json_str = json.dumps(dify_data, ensure_ascii=False, indent=2)
        
        # 提供下载
        st.download_button(
            label="📄 下载 Dify 格式文件",
            data=json_str,
            file_name=f"dify_export_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json",
            mime="application/json"
        )
        
        st.success(f"✅ 已生成 Dify 格式文件，包含 {len(dify_data)} 个切片")
        
    except Exception as e:
        st.error(f"❌ Dify 格式导出失败: {str(e)}")

def export_ragflow_format(results: Dict[str, Any]):
    """导出Ragflow格式"""
    
    try:
        ragflow_data = []
        
        for file_result in results.get('files', []):
            filename = file_result.get('filename', '未知文件')
            doc_id = f"doc_{hash(filename) % 1000000}"
            
            for i, chunk in enumerate(file_result.get('chunks', [])):
                ragflow_item = {
                    'text': chunk.get('content', ''),
                    'meta': {
                        'doc_name': filename,
                        'chunk_id': chunk.get('id', f'chunk_{i}'),
                        'page_num': chunk.get('metadata', {}).get('page', i + 1),
                        'file_type': file_result.get('format', ''),
                        **chunk.get('metadata', {})
                    },
                    'doc_id': doc_id
                }
                ragflow_data.append(ragflow_item)
        
        # 生成JSON文件
        json_str = json.dumps(ragflow_data, ensure_ascii=False, indent=2)
        
        # 提供下载
        st.download_button(
            label="🌊 下载 Ragflow 格式文件",
            data=json_str,
            file_name=f"ragflow_export_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json",
            mime="application/json"
        )
        
        st.success(f"✅ 已生成 Ragflow 格式文件，包含 {len(ragflow_data)} 个切片")
        
    except Exception as e:
        st.error(f"❌ Ragflow 格式导出失败: {str(e)}")

def export_plain_text(results: Dict[str, Any]):
    """导出纯文本格式"""
    
    try:
        text_content = []
        
        for file_result in results.get('files', []):
            filename = file_result.get('filename', '未知文件')
            text_content.append(f"=== {filename} ===")
            text_content.append("")
            
            for i, chunk in enumerate(file_result.get('chunks', [])):
                text_content.append(f"--- 切片 {i+1} ---")
                text_content.append(chunk.get('content', ''))
                text_content.append("")
        
        # 合并文本
        full_text = "\n".join(text_content)
        
        # 提供下载
        st.download_button(
            label="📝 下载纯文本文件",
            data=full_text,
            file_name=f"text_export_{datetime.now().strftime('%Y%m%d_%H%M%S')}.txt",
            mime="text/plain"
        )
        
        total_chunks = sum(len(f.get('chunks', [])) for f in results.get('files', []))
        st.success(f"✅ 已生成纯文本文件，包含 {total_chunks} 个切片")
        
    except Exception as e:
        st.error(f"❌ 纯文本导出失败: {str(e)}")

def export_csv_format(results: Dict[str, Any]):
    """导出CSV格式"""
    
    try:
        csv_data = []
        
        for file_result in results.get('files', []):
            filename = file_result.get('filename', '未知文件')
            
            for i, chunk in enumerate(file_result.get('chunks', [])):
                csv_row = {
                    'id': chunk.get('id', f'chunk_{i}'),
                    'content': chunk.get('content', ''),
                    'source_file': filename,
                    'chunk_index': i,
                    'file_format': file_result.get('format', ''),
                    'character_count': len(chunk.get('content', '')),
                    'chunk_type': chunk.get('type', ''),
                    'metadata': json.dumps(chunk.get('metadata', {}), ensure_ascii=False)
                }
                csv_data.append(csv_row)
        
        # 创建DataFrame
        df = pd.DataFrame(csv_data)
        
        # 转换为CSV
        csv_buffer = StringIO()
        df.to_csv(csv_buffer, index=False, encoding='utf-8-sig')
        csv_str = csv_buffer.getvalue()
        
        # 提供下载
        st.download_button(
            label="📈 下载 CSV 文件",
            data=csv_str,
            file_name=f"csv_export_{datetime.now().strftime('%Y%m%d_%H%M%S')}.csv",
            mime="text/csv"
        )
        
        st.success(f"✅ 已生成 CSV 文件，包含 {len(csv_data)} 行数据")
        
    except Exception as e:
        st.error(f"❌ CSV 导出失败: {str(e)}")

def export_excel_format(results: Dict[str, Any]):
    """导出Excel格式"""
    
    try:
        # 创建Excel工作簿
        excel_buffer = BytesIO()
        
        with pd.ExcelWriter(excel_buffer, engine='openpyxl') as writer:
            # 切片数据工作表
            chunks_data = []
            for file_result in results.get('files', []):
                filename = file_result.get('filename', '未知文件')
                
                for i, chunk in enumerate(file_result.get('chunks', [])):
                    chunks_data.append({
                        'ID': chunk.get('id', f'chunk_{i}'),
                        '内容': chunk.get('content', ''),
                        '来源文件': filename,
                        '切片索引': i,
                        '文件格式': file_result.get('format', ''),
                        '字符数': len(chunk.get('content', '')),
                        '切片类型': chunk.get('type', ''),
                        '元数据': json.dumps(chunk.get('metadata', {}), ensure_ascii=False)
                    })
            
            if chunks_data:
                chunks_df = pd.DataFrame(chunks_data)
                chunks_df.to_excel(writer, sheet_name='切片数据', index=False)
            
            # 统计信息工作表
            stats_data = []
            for file_result in results.get('files', []):
                stats = file_result.get('stats', {})
                stats_data.append({
                    '文件名': file_result.get('filename', '未知文件'),
                    '格式': file_result.get('format', ''),
                    '原始字符数': stats.get('original_characters', 0),
                    '处理后字符数': stats.get('total_characters', 0),
                    '切片数量': len(file_result.get('chunks', [])),
                    '处理时间': file_result.get('processing_time', 0)
                })
            
            if stats_data:
                stats_df = pd.DataFrame(stats_data)
                stats_df.to_excel(writer, sheet_name='统计信息', index=False)
        
        excel_data = excel_buffer.getvalue()
        
        # 提供下载
        st.download_button(
            label="📋 下载 Excel 文件",
            data=excel_data,
            file_name=f"excel_export_{datetime.now().strftime('%Y%m%d_%H%M%S')}.xlsx",
            mime="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
        )
        
        total_chunks = len(chunks_data) if 'chunks_data' in locals() else 0
        st.success(f"✅ 已生成 Excel 文件，包含 {total_chunks} 个切片")
        
    except Exception as e:
        st.error(f"❌ Excel 导出失败: {str(e)}")

def export_markdown_format(results: Dict[str, Any]):
    """导出Markdown格式"""
    
    try:
        markdown_content = []
        
        # 添加标题
        markdown_content.append("# 文档处理结果")
        markdown_content.append("")
        markdown_content.append(f"生成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
        markdown_content.append("")
        
        # 添加统计信息
        total_files = len(results.get('files', []))
        total_chunks = sum(len(f.get('chunks', [])) for f in results.get('files', []))
        
        markdown_content.append("## 📊 处理统计")
        markdown_content.append("")
        markdown_content.append(f"- **处理文件数**: {total_files}")
        markdown_content.append(f"- **总切片数**: {total_chunks}")
        markdown_content.append("")
        
        # 添加文件内容
        for file_result in results.get('files', []):
            filename = file_result.get('filename', '未知文件')
            markdown_content.append(f"## 📄 {filename}")
            markdown_content.append("")
            
            # 文件信息
            stats = file_result.get('stats', {})
            markdown_content.append(f"- **格式**: {file_result.get('format', '未知')}")
            markdown_content.append(f"- **字符数**: {stats.get('total_characters', 0):,}")
            markdown_content.append(f"- **切片数**: {len(file_result.get('chunks', []))}")
            markdown_content.append("")
            
            # 切片内容
            for i, chunk in enumerate(file_result.get('chunks', [])):
                markdown_content.append(f"### 切片 {i+1}")
                markdown_content.append("")
                markdown_content.append("```")
                markdown_content.append(chunk.get('content', ''))
                markdown_content.append("```")
                markdown_content.append("")
        
        # 合并内容
        full_markdown = "\n".join(markdown_content)
        
        # 提供下载
        st.download_button(
            label="🗂️ 下载 Markdown 文件",
            data=full_markdown,
            file_name=f"markdown_export_{datetime.now().strftime('%Y%m%d_%H%M%S')}.md",
            mime="text/markdown"
        )
        
        st.success(f"✅ 已生成 Markdown 文件，包含 {total_chunks} 个切片")
        
    except Exception as e:
        st.error(f"❌ Markdown 导出失败: {str(e)}")

def export_custom_format(results: Dict[str, Any], options: Dict[str, Any]):
    """自定义格式导出"""
    
    try:
        # 根据选项过滤和处理数据
        filtered_data = []
        
        selected_files = options.get('selected_files', [])
        files = results.get('files', [])
        
        for file_idx in selected_files:
            if file_idx < len(files):
                file_result = files[file_idx]
                
                # 处理切片
                for chunk in file_result.get('chunks', []):
                    content = chunk.get('content', '')
                    
                    # 应用大小过滤
                    if options.get('min_chunk_size', 0) > 0 and len(content) < options['min_chunk_size']:
                        continue
                    if options.get('max_chunk_size', 0) > 0 and len(content) > options['max_chunk_size']:
                        continue
                    
                    # 构建数据项
                    field_mapping = options.get('field_mapping', {})
                    
                    data_item = {
                        field_mapping.get('id', 'id'): chunk.get('id', ''),
                        field_mapping.get('content', 'content'): content,
                        field_mapping.get('source', 'source'): file_result.get('filename', '')
                    }
                    
                    # 添加可选字段
                    if options.get('include_metadata'):
                        data_item[field_mapping.get('metadata', 'metadata')] = chunk.get('metadata', {})
                    
                    filtered_data.append(data_item)
        
        # 根据格式导出
        export_format = options.get('format', 'JSON')
        
        if export_format == 'JSON':
            content = json.dumps(filtered_data, ensure_ascii=False, indent=2)
            mime_type = "application/json"
            file_ext = "json"
        elif export_format == 'CSV':
            df = pd.DataFrame(filtered_data)
            csv_buffer = StringIO()
            df.to_csv(csv_buffer, index=False, encoding='utf-8-sig')
            content = csv_buffer.getvalue()
            mime_type = "text/csv"
            file_ext = "csv"
        elif export_format == 'TXT':
            text_lines = []
            for item in filtered_data:
                text_lines.append(f"ID: {item.get('id', '')}")
                text_lines.append(f"Content: {item.get('content', '')}")
                text_lines.append("---")
            content = "\n".join(text_lines)
            mime_type = "text/plain"
            file_ext = "txt"
        else:
            st.error(f"不支持的导出格式: {export_format}")
            return
        
        # 提供下载
        st.download_button(
            label=f"📥 下载自定义 {export_format} 文件",
            data=content,
            file_name=f"custom_export_{datetime.now().strftime('%Y%m%d_%H%M%S')}.{file_ext}",
            mime=mime_type
        )
        
        st.success(f"✅ 已生成自定义 {export_format} 文件，包含 {len(filtered_data)} 个切片")
        
    except Exception as e:
        st.error(f"❌ 自定义导出失败: {str(e)}")

def create_batch_download(results: Dict[str, Any], options: Dict[str, Any]):
    """创建批量下载"""
    
    try:
        # 创建临时文件字典
        files_to_zip = {}
        
        formats = options.get('formats', [])
        
        # 生成各种格式的文件
        for format_name in formats:
            if format_name == "Dify JSON":
                content = generate_dify_content(results)
                files_to_zip["dify_format.json"] = content
            elif format_name == "Ragflow JSON":
                content = generate_ragflow_content(results)
                files_to_zip["ragflow_format.json"] = content
            elif format_name == "CSV":
                content = generate_csv_content(results)
                files_to_zip["data_export.csv"] = content
            elif format_name == "TXT":
                content = generate_txt_content(results)
                files_to_zip["text_export.txt"] = content
            elif format_name == "Markdown":
                content = generate_markdown_content(results)
                files_to_zip["markdown_export.md"] = content
            elif format_name == "Excel":
                content = generate_excel_content(results)
                files_to_zip["excel_export.xlsx"] = content
        
        # 创建ZIP文件
        if options.get('compression') == 'ZIP':
            zip_buffer = BytesIO()
            
            with zipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) as zip_file:
                for filename, content in files_to_zip.items():
                    if isinstance(content, str):
                        zip_file.writestr(filename, content.encode('utf-8'))
                    else:
                        zip_file.writestr(filename, content)
            
            zip_data = zip_buffer.getvalue()
            
            # 提供下载
            st.download_button(
                label="📦 下载批量导出压缩包",
                data=zip_data,
                file_name=f"batch_export_{datetime.now().strftime('%Y%m%d_%H%M%S')}.zip",
                mime="application/zip"
            )
        else:
            # 无压缩，提供单独下载链接
            st.markdown("**📁 批量下载文件**")
            
            for filename, content in files_to_zip.items():
                mime_type = get_mime_type(filename)
                
                st.download_button(
                    label=f"📥 {filename}",
                    data=content if isinstance(content, (str, bytes)) else str(content),
                    file_name=filename,
                    mime=mime_type,
                    key=f"download_{filename}"
                )
        
        st.success(f"✅ 已生成 {len(files_to_zip)} 个文件的批量导出")
        
    except Exception as e:
        st.error(f"❌ 批量导出失败: {str(e)}")

def render_export_statistics(results: Dict[str, Any]):
    """渲染导出统计"""
    
    st.markdown("**📈 导出统计**")
    
    col1, col2, col3, col4 = st.columns(4)
    
    total_files = len(results.get('files', []))
    total_chunks = sum(len(f.get('chunks', [])) for f in results.get('files', []))
    total_chars = sum(f.get('stats', {}).get('total_characters', 0) for f in results.get('files', []))
    avg_chunk_size = total_chars / total_chunks if total_chunks > 0 else 0
    
    with col1:
        st.metric("可导出文件", total_files)
    
    with col2:
        st.metric("可导出切片", total_chunks)
    
    with col3:
        st.metric("总字符数", f"{total_chars:,}")
    
    with col4:
        st.metric("平均切片大小", f"{avg_chunk_size:.0f}")

def get_format_mapping(source_format: str, target_format: str) -> Dict[str, str]:
    """获取格式映射关系"""
    
    format_fields = {
        'Dify': {'content': 'content', 'metadata': 'metadata', 'source': 'source'},
        'Ragflow': {'text': 'text', 'meta': 'meta', 'doc_id': 'doc_id'},
        'LangChain': {'page_content': 'page_content', 'metadata': 'metadata'},
        '通用JSON': {'content': 'content', 'metadata': 'metadata', 'id': 'id'},
        'CSV': {'content': 'content', 'metadata': 'metadata', 'source': 'source'}
    }
    
    source_fields = format_fields.get(source_format, {})
    target_fields = format_fields.get(target_format, {})
    
    # 创建映射关系
    mapping = {}
    for source_key in source_fields.keys():
        # 尝试找到对应的目标字段
        if source_key in target_fields:
            mapping[source_key] = source_key
        elif 'content' in source_key.lower() and any('content' in k.lower() for k in target_fields.keys()):
            target_key = next(k for k in target_fields.keys() if 'content' in k.lower())
            mapping[source_key] = target_key
        elif 'metadata' in source_key.lower() and any('meta' in k.lower() for k in target_fields.keys()):
            target_key = next(k for k in target_fields.keys() if 'meta' in k.lower())
            mapping[source_key] = target_key
    
    return mapping

def convert_format(results: Dict[str, Any], source_format: str, target_format: str):
    """格式转换"""
    
    try:
        # 获取映射关系
        mapping = get_format_mapping(source_format, target_format)
        
        # 转换数据
        converted_data = []
        
        for file_result in results.get('files', []):
            for chunk in file_result.get('chunks', []):
                # 根据目标格式创建数据项
                if target_format == 'Dify':
                    item = {
                        'content': chunk.get('content', ''),
                        'metadata': chunk.get('metadata', {}),
                        'source': file_result.get('filename', '')
                    }
                elif target_format == 'Ragflow':
                    item = {
                        'text': chunk.get('content', ''),
                        'meta': chunk.get('metadata', {}),
                        'doc_id': f"doc_{hash(file_result.get('filename', '')) % 1000000}"
                    }
                elif target_format == 'LangChain':
                    item = {
                        'page_content': chunk.get('content', ''),
                        'metadata': chunk.get('metadata', {})
                    }
                else:
                    item = {
                        'content': chunk.get('content', ''),
                        'metadata': chunk.get('metadata', {}),
                        'id': chunk.get('id', ''),
                        'source': file_result.get('filename', '')
                    }
                
                converted_data.append(item)
        
        # 生成转换后的文件
        json_str = json.dumps(converted_data, ensure_ascii=False, indent=2)
        
        # 提供下载
        st.download_button(
            label=f"🔄 下载转换后的 {target_format} 格式文件",
            data=json_str,
            file_name=f"{target_format.lower()}_converted_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json",
            mime="application/json"
        )
        
        st.success(f"✅ 已完成从 {source_format} 到 {target_format} 的格式转换，包含 {len(converted_data)} 个切片")
        
    except Exception as e:
        st.error(f"❌ 格式转换失败: {str(e)}")

# 辅助函数
def generate_dify_content(results: Dict[str, Any]) -> str:
    """生成Dify格式内容"""
    dify_data = []
    for file_result in results.get('files', []):
        for i, chunk in enumerate(file_result.get('chunks', [])):
            dify_data.append({
                'content': chunk.get('content', ''),
                'metadata': chunk.get('metadata', {}),
                'source': file_result.get('filename', '')
            })
    return json.dumps(dify_data, ensure_ascii=False, indent=2)

def generate_ragflow_content(results: Dict[str, Any]) -> str:
    """生成Ragflow格式内容"""
    ragflow_data = []
    for file_result in results.get('files', []):
        doc_id = f"doc_{hash(file_result.get('filename', '')) % 1000000}"
        for i, chunk in enumerate(file_result.get('chunks', [])):
            ragflow_data.append({
                'text': chunk.get('content', ''),
                'meta': chunk.get('metadata', {}),
                'doc_id': doc_id
            })
    return json.dumps(ragflow_data, ensure_ascii=False, indent=2)

def generate_csv_content(results: Dict[str, Any]) -> str:
    """生成CSV格式内容"""
    csv_data = []
    for file_result in results.get('files', []):
        for i, chunk in enumerate(file_result.get('chunks', [])):
            csv_data.append({
                'id': chunk.get('id', f'chunk_{i}'),
                'content': chunk.get('content', ''),
                'source_file': file_result.get('filename', ''),
                'metadata': json.dumps(chunk.get('metadata', {}), ensure_ascii=False)
            })
    
    df = pd.DataFrame(csv_data)
    csv_buffer = StringIO()
    df.to_csv(csv_buffer, index=False, encoding='utf-8-sig')
    return csv_buffer.getvalue()

def generate_txt_content(results: Dict[str, Any]) -> str:
    """生成TXT格式内容"""
    text_lines = []
    for file_result in results.get('files', []):
        text_lines.append(f"=== {file_result.get('filename', '未知文件')} ===")
        for i, chunk in enumerate(file_result.get('chunks', [])):
            text_lines.append(f"--- 切片 {i+1} ---")
            text_lines.append(chunk.get('content', ''))
            text_lines.append("")
    return "\n".join(text_lines)

def generate_markdown_content(results: Dict[str, Any]) -> str:
    """生成Markdown格式内容"""
    md_lines = ["# 文档处理结果", ""]
    for file_result in results.get('files', []):
        md_lines.append(f"## {file_result.get('filename', '未知文件')}")
        for i, chunk in enumerate(file_result.get('chunks', [])):
            md_lines.append(f"### 切片 {i+1}")
            md_lines.append("```")
            md_lines.append(chunk.get('content', ''))
            md_lines.append("```")
            md_lines.append("")
    return "\n".join(md_lines)

def generate_excel_content(results: Dict[str, Any]) -> bytes:
    """生成Excel格式内容"""
    excel_buffer = BytesIO()
    chunks_data = []
    
    for file_result in results.get('files', []):
        for i, chunk in enumerate(file_result.get('chunks', [])):
            chunks_data.append({
                'ID': chunk.get('id', f'chunk_{i}'),
                '内容': chunk.get('content', ''),
                '来源文件': file_result.get('filename', ''),
                '元数据': json.dumps(chunk.get('metadata', {}), ensure_ascii=False)
            })
    
    df = pd.DataFrame(chunks_data)
    df.to_excel(excel_buffer, index=False, engine='openpyxl')
    return excel_buffer.getvalue()

def get_mime_type(filename: str) -> str:
    """获取文件MIME类型"""
    ext = filename.split('.')[-1].lower()
    mime_types = {
        'json': 'application/json',
        'csv': 'text/csv',
        'txt': 'text/plain',
        'md': 'text/markdown',
        'xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
    }
    return mime_types.get(ext, 'application/octet-stream')