import streamlit as st
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from typing import Dict, Any, List, Optional
import json

def render_preview_page():
    """渲染预览页面"""
    
    st.header("👀 处理结果预览")
    
    # 检查是否有处理结果 - 修复数据源
    processed_docs = st.session_state.get('processed_documents', [])
    if not processed_docs:
        st.info("📝 暂无处理结果，请先在主页面上传并处理文档。")
        return
    
    # 转换数据格式以适配现有函数
    results = {
        'files': processed_docs,
        'processing_time': sum(doc.get('processing_time', 0) for doc in processed_docs),
        'total_files': len(processed_docs)
    }
    
    # 显示数据存储信息
    st.info(f"💾 **数据存储位置**: 内存中（session_state.processed_documents），共 {len(processed_docs)} 个文件的处理结果")
    
    # 添加数据导出功能
    render_export_section(processed_docs)
    
    # 创建标签页
    tab1, tab2, tab3, tab4 = st.tabs(["📊 统计概览", "📄 文档内容", "✂️ 切片预览", "📈 质量分析"])
    
    with tab1:
        render_statistics_overview(results)
    
    with tab2:
        render_document_content(results)
    
    with tab3:
        render_chunks_preview(results)
    
    with tab4:
        render_quality_analysis(results)

def render_export_section(processed_docs: List[Dict[str, Any]]):
    """渲染数据导出区域"""
    
    st.markdown("---")
    st.subheader("📥 数据导出")
    
    col1, col2, col3 = st.columns(3)
    
    with col1:
        st.markdown("**📄 JSON格式**")
        st.caption("完整的结构化数据，包含所有元数据")
        
        if st.button("下载 JSON", key="download_json"):
            json_data = json.dumps(processed_docs, ensure_ascii=False, indent=2)
            st.download_button(
                label="💾 保存 JSON 文件",
                data=json_data,
                file_name=f"processed_documents_{len(processed_docs)}files.json",
                mime="application/json"
            )
    
    with col2:
        st.markdown("**📝 TXT格式**")
        st.caption("纯文本格式，仅包含切片内容")
        
        if st.button("下载 TXT", key="download_txt"):
            txt_content = ""
            for doc in processed_docs:
                txt_content += f"=== {doc.get('metadata', {}).get('title', '未知文件')} ===\n\n"
                for i, chunk in enumerate(doc.get('chunks', []), 1):
                    txt_content += f"[切片 {i}]\n{chunk.get('content', '')}\n\n"
                txt_content += "\n" + "="*50 + "\n\n"
            
            st.download_button(
                label="💾 保存 TXT 文件",
                data=txt_content,
                file_name=f"processed_documents_{len(processed_docs)}files.txt",
                mime="text/plain"
            )
    
    with col3:
        st.markdown("**📊 CSV格式**")
        st.caption("表格格式，便于数据分析")
        
        if st.button("下载 CSV", key="download_csv"):
            csv_data = []
            for doc in processed_docs:
                filename = doc.get('metadata', {}).get('title', '未知文件')
                for i, chunk in enumerate(doc.get('chunks', []), 1):
                    csv_data.append({
                        '文件名': filename,
                        '切片编号': i,
                        '切片类型': chunk.get('type', '未知'),
                        '字符数': len(chunk.get('content', '')),
                        '内容': chunk.get('content', '')[:100] + '...' if len(chunk.get('content', '')) > 100 else chunk.get('content', ''),
                        '完整内容': chunk.get('content', '')
                    })
            
            if csv_data:
                df = pd.DataFrame(csv_data)
                csv_string = df.to_csv(index=False, encoding='utf-8-sig')
                st.download_button(
                    label="💾 保存 CSV 文件",
                    data=csv_string,
                    file_name=f"processed_documents_{len(processed_docs)}files.csv",
                    mime="text/csv"
                )
    
    # 数据统计信息
    st.markdown("---")
    total_chunks = sum(len(doc.get('chunks', [])) for doc in processed_docs)
    total_chars = sum(sum(len(chunk.get('content', '')) for chunk in doc.get('chunks', [])) for doc in processed_docs)
    
    col1, col2, col3, col4 = st.columns(4)
    with col1:
        st.metric("处理文件数", len(processed_docs))
    with col2:
        st.metric("总切片数", total_chunks)
    with col3:
        st.metric("总字符数", f"{total_chars:,}")
    with col4:
        avg_size = total_chars / total_chunks if total_chunks > 0 else 0
        st.metric("平均切片大小", f"{avg_size:.0f}")

def render_statistics_overview(results: Dict[str, Any]):
    """渲染统计概览"""
    
    st.subheader("📊 处理统计")
    
    # 基础统计信息
    col1, col2, col3, col4 = st.columns(4)
    
    with col1:
        st.metric(
            "处理文件数",
            len(results.get('files', [])),
            help="成功处理的文件数量"
        )
    
    with col2:
        total_chunks = sum(len(file_result.get('chunks', [])) for file_result in results.get('files', []))
        st.metric(
            "总切片数",
            total_chunks,
            help="生成的文档切片总数"
        )
    
    with col3:
        total_chars = sum(file_result.get('stats', {}).get('total_characters', 0) for file_result in results.get('files', []))
        st.metric(
            "总字符数",
            f"{total_chars:,}",
            help="处理后的总字符数"
        )
    
    with col4:
        avg_chunk_size = total_chars / total_chunks if total_chunks > 0 else 0
        st.metric(
            "平均切片大小",
            f"{avg_chunk_size:.0f}",
            help="平均每个切片的字符数"
        )
    
    # 文件处理详情
    st.markdown("---")
    st.subheader("📁 文件处理详情")
    
    file_data = []
    for file_result in results.get('files', []):
        stats = file_result.get('stats', {})
        filename = file_result.get('metadata', {}).get('title', file_result.get('filename', '未知'))
        file_data.append({
            '文件名': filename,
            '格式': file_result.get('format', '未知'),
            '原始字符数': stats.get('original_characters', 0),
            '处理后字符数': stats.get('total_characters', 0),
            '切片数量': len(file_result.get('chunks', [])),
            '清洗率': f"{((stats.get('original_characters', 0) - stats.get('total_characters', 0)) / max(stats.get('original_characters', 1), 1) * 100):.1f}%"
        })
    
    if file_data:
        df = pd.DataFrame(file_data)
        st.dataframe(df, use_container_width=True)
    
    # 可视化图表
    render_statistics_charts(results)

def render_statistics_charts(results: Dict[str, Any]):
    """渲染统计图表"""
    
    st.markdown("---")
    st.subheader("📈 可视化分析")
    
    col1, col2 = st.columns(2)
    
    with col1:
        # 文件格式分布
        format_counts = {}
        for file_result in results.get('files', []):
            fmt = file_result.get('format', '未知')
            format_counts[fmt] = format_counts.get(fmt, 0) + 1
        
        if format_counts:
            fig_format = px.pie(
                values=list(format_counts.values()),
                names=list(format_counts.keys()),
                title="文件格式分布"
            )
            st.plotly_chart(fig_format, use_container_width=True)
    
    with col2:
        # 切片大小分布
        chunk_sizes = []
        for file_result in results.get('files', []):
            for chunk in file_result.get('chunks', []):
                chunk_sizes.append(len(chunk.get('content', '')))
        
        if chunk_sizes:
            fig_chunks = px.histogram(
                x=chunk_sizes,
                title="切片大小分布",
                labels={'x': '切片大小（字符数）', 'y': '数量'}
            )
            st.plotly_chart(fig_chunks, use_container_width=True)
    
    # 处理时间分析
    if results.get('processing_time'):
        st.markdown("**⏱️ 处理时间分析**")
        
        time_data = []
        for file_result in results.get('files', []):
            if 'processing_time' in file_result:
                filename = file_result.get('metadata', {}).get('title', file_result.get('filename', '未知'))
                time_data.append({
                    '文件名': filename,
                    '处理时间（秒）': file_result.get('processing_time', 0)
                })
        
        if time_data:
            df_time = pd.DataFrame(time_data)
            fig_time = px.bar(
                df_time,
                x='文件名',
                y='处理时间（秒）',
                title="各文件处理时间"
            )
            st.plotly_chart(fig_time, use_container_width=True)

def render_document_content(results: Dict[str, Any]):
    """渲染文档内容"""
    
    st.subheader("📄 文档内容预览")
    
    files = results.get('files', [])
    if not files:
        st.info("暂无文档内容")
        return
    
    # 文件选择
    file_options = [f"{i+1}. {file_result.get('metadata', {}).get('title', file_result.get('filename', f'文件{i+1}'))}" for i, file_result in enumerate(files)]
    selected_file_idx = st.selectbox(
        "选择要预览的文件",
        range(len(file_options)),
        format_func=lambda x: file_options[x]
    )
    
    selected_file = files[selected_file_idx]
    
    # 显示文件信息
    col1, col2, col3 = st.columns(3)
    
    with col1:
        st.info(f"**格式**: {selected_file.get('format', '未知')}")
    
    with col2:
        stats = selected_file.get('stats', {})
        st.info(f"**字符数**: {stats.get('total_characters', 0):,}")
    
    with col3:
        st.info(f"**切片数**: {len(selected_file.get('chunks', []))}")
    
    # 内容预览选项
    preview_option = st.radio(
        "预览内容",
        ["原始内容", "清洗后内容", "结构信息"],
        horizontal=True
    )
    
    if preview_option == "原始内容":
        render_original_content(selected_file)
    elif preview_option == "清洗后内容":
        render_cleaned_content(selected_file)
    else:
        render_structure_info(selected_file)

def render_original_content(file_result: Dict[str, Any]):
    """渲染原始内容"""
    
    original_content = file_result.get('original_content', '')
    
    if original_content:
        st.markdown("**原始文档内容**")
        
        # 限制显示长度
        max_length = st.slider("显示长度", 500, 5000, 2000, 500)
        
        if len(original_content) > max_length:
            st.text_area(
                "内容预览（已截断）",
                original_content[:max_length] + "\n\n[内容已截断...]",
                height=400
            )
            st.info(f"完整内容长度: {len(original_content):,} 字符")
        else:
            st.text_area(
                "完整内容",
                original_content,
                height=400
            )
    else:
        st.warning("暂无原始内容")

def render_cleaned_content(file_result: Dict[str, Any]):
    """渲染清洗后内容"""
    
    cleaned_content = file_result.get('cleaned_content', '')
    
    if cleaned_content:
        st.markdown("**清洗后文档内容**")
        
        # 显示清洗统计
        stats = file_result.get('stats', {})
        if 'cleaning_stats' in stats:
            cleaning_stats = stats['cleaning_stats']
            
            col1, col2, col3 = st.columns(3)
            with col1:
                st.metric("移除字符数", cleaning_stats.get('removed_characters', 0))
            with col2:
                st.metric("移除行数", cleaning_stats.get('removed_lines', 0))
            with col3:
                st.metric("清洗操作数", cleaning_stats.get('operations_count', 0))
        
        # 限制显示长度
        max_length = st.slider("显示长度", 500, 5000, 2000, 500, key="cleaned_length")
        
        if len(cleaned_content) > max_length:
            st.text_area(
                "内容预览（已截断）",
                cleaned_content[:max_length] + "\n\n[内容已截断...]",
                height=400
            )
            st.info(f"完整内容长度: {len(cleaned_content):,} 字符")
        else:
            st.text_area(
                "完整内容",
                cleaned_content,
                height=400
            )
    else:
        st.warning("暂无清洗后内容")

def render_structure_info(file_result: Dict[str, Any]):
    """渲染结构信息"""
    
    structure = file_result.get('structure', {})
    
    if not structure:
        st.warning("暂无结构信息")
        return
    
    st.markdown("**文档结构信息**")
    
    # 标题结构
    if 'headings' in structure:
        headings = structure['headings']
        if headings:
            st.markdown("**📋 标题结构**")
            
            heading_data = []
            for heading in headings:
                heading_data.append({
                    '级别': heading.get('level', 0),
                    '标题': heading.get('title', ''),
                    '位置': heading.get('line_number', 0) or heading.get('page', 0)
                })
            
            df_headings = pd.DataFrame(heading_data)
            st.dataframe(df_headings, use_container_width=True)
    
    # 其他结构信息
    col1, col2 = st.columns(2)
    
    with col1:
        if 'tables' in structure:
            tables = structure['tables']
            st.metric("表格数量", len(tables))
            
            if tables:
                st.markdown("**表格信息**")
                for i, table in enumerate(tables[:3]):  # 只显示前3个
                    with st.expander(f"表格 {i+1}"):
                        st.json(table)
    
    with col2:
        if 'images' in structure:
            images = structure['images']
            st.metric("图片数量", len(images))
            
            if images:
                st.markdown("**图片信息**")
                for i, image in enumerate(images[:3]):  # 只显示前3个
                    with st.expander(f"图片 {i+1}"):
                        st.json(image)

def render_chunks_preview(results: Dict[str, Any]):
    """渲染切片预览"""
    
    st.subheader("✂️ 文档切片预览")
    
    files = results.get('files', [])
    if not files:
        st.info("暂无切片数据")
        return
    
    # 文件选择
    file_options = [f"{i+1}. {file_result.get('metadata', {}).get('title', file_result.get('filename', f'文件{i+1}'))}" for i, file_result in enumerate(files)]
    selected_file_idx = st.selectbox(
        "选择要预览切片的文件",
        range(len(file_options)),
        format_func=lambda x: file_options[x],
        key="chunk_file_select"
    )
    
    selected_file = files[selected_file_idx]
    chunks = selected_file.get('chunks', [])
    
    if not chunks:
        st.warning("该文件暂无切片数据")
        return
    
    # 切片统计
    col1, col2, col3, col4 = st.columns(4)
    
    with col1:
        st.metric("切片总数", len(chunks))
    
    with col2:
        avg_size = sum(len(chunk.get('content', '')) for chunk in chunks) / len(chunks)
        st.metric("平均大小", f"{avg_size:.0f}")
    
    with col3:
        min_size = min(len(chunk.get('content', '')) for chunk in chunks) if chunks else 0
        st.metric("最小切片", min_size)
    
    with col4:
        max_size = max(len(chunk.get('content', '')) for chunk in chunks) if chunks else 0
        st.metric("最大切片", max_size)
    
    # 切片列表
    st.markdown("---")
    st.markdown("**切片列表**")
    
    # 分页显示
    chunks_per_page = st.selectbox("每页显示切片数", [5, 10, 20, 50], index=1)
    total_pages = (len(chunks) - 1) // chunks_per_page + 1
    
    if total_pages > 1:
        page = st.selectbox("页码", range(1, total_pages + 1))
        start_idx = (page - 1) * chunks_per_page
        end_idx = min(start_idx + chunks_per_page, len(chunks))
        page_chunks = chunks[start_idx:end_idx]
    else:
        page_chunks = chunks
        start_idx = 0
    
    # 显示切片
    for i, chunk in enumerate(page_chunks):
        chunk_idx = start_idx + i
        
        with st.expander(f"切片 {chunk_idx + 1} ({len(chunk.get('content', ''))} 字符)"):
            # 切片元数据
            col1, col2 = st.columns(2)
            
            with col1:
                st.markdown(f"**ID**: {chunk.get('id', f'chunk_{chunk_idx}')}")
                st.markdown(f"**类型**: {chunk.get('type', '未知')}")
            
            with col2:
                st.markdown(f"**字符数**: {len(chunk.get('content', ''))}")
                if 'metadata' in chunk:
                    st.markdown(f"**元数据**: {len(chunk['metadata'])} 项")
            
            # 切片内容
            content = chunk.get('content', '')
            if content:
                # 限制显示长度
                preview_length = min(500, len(content))
                
                if len(content) > preview_length:
                    st.text_area(
                        "内容预览",
                        content[:preview_length] + "\n\n[内容已截断...]",
                        height=150,
                        key=f"chunk_content_{chunk_idx}"
                    )
                else:
                    st.text_area(
                        "完整内容",
                        content,
                        height=150,
                        key=f"chunk_content_{chunk_idx}"
                    )
            
            # 切片元数据详情
            if chunk.get('metadata'):
                with st.expander("元数据详情"):
                    st.json(chunk['metadata'])

def render_quality_analysis(results: Dict[str, Any]):
    """渲染质量分析"""
    
    st.subheader("📈 处理质量分析")
    
    files = results.get('files', [])
    if not files:
        st.info("暂无质量分析数据")
        return
    
    # 整体质量指标
    st.markdown("**🎯 整体质量指标**")
    
    col1, col2, col3, col4 = st.columns(4)
    
    # 计算质量指标
    total_files = len(files)
    successful_files = sum(1 for f in files if f.get('status') == 'success')
    
    with col1:
        success_rate = (successful_files / total_files * 100) if total_files > 0 else 0
        st.metric("处理成功率", f"{success_rate:.1f}%")
    
    with col2:
        total_chunks = sum(len(f.get('chunks', [])) for f in files)
        valid_chunks = sum(1 for f in files for chunk in f.get('chunks', []) if len(chunk.get('content', '')) > 50)
        chunk_quality = (valid_chunks / total_chunks * 100) if total_chunks > 0 else 0
        st.metric("切片质量", f"{chunk_quality:.1f}%")
    
    with col3:
        avg_cleaning_rate = sum(
            ((f.get('stats', {}).get('original_characters', 0) - f.get('stats', {}).get('total_characters', 0)) / 
             max(f.get('stats', {}).get('original_characters', 1), 1) * 100)
            for f in files
        ) / len(files) if files else 0
        st.metric("平均清洗率", f"{avg_cleaning_rate:.1f}%")
    
    with col4:
        total_processing_time = sum(f.get('processing_time', 0) for f in files)
        avg_processing_time = total_processing_time / len(files) if files else 0
        st.metric("平均处理时间", f"{avg_processing_time:.2f}s")
    
    # 详细质量分析
    st.markdown("---")
    st.markdown("**📊 详细质量分析**")
    
    # 创建质量分析表格
    quality_data = []
    for file_result in files:
        stats = file_result.get('stats', {})
        chunks = file_result.get('chunks', [])
        
        # 计算各种质量指标
        original_chars = stats.get('original_characters', 0)
        final_chars = stats.get('total_characters', 0)
        cleaning_rate = ((original_chars - final_chars) / max(original_chars, 1) * 100) if original_chars > 0 else 0
        
        chunk_sizes = [len(chunk.get('content', '')) for chunk in chunks]
        avg_chunk_size = sum(chunk_sizes) / len(chunk_sizes) if chunk_sizes else 0
        chunk_variance = sum((size - avg_chunk_size) ** 2 for size in chunk_sizes) / len(chunk_sizes) if chunk_sizes else 0
        
        filename = file_result.get('metadata', {}).get('title', file_result.get('filename', '未知'))
        quality_data.append({
            '文件名': filename,
            '状态': '✅ 成功' if file_result.get('status') == 'success' else '❌ 失败',
            '清洗率': f"{cleaning_rate:.1f}%",
            '切片数': len(chunks),
            '平均切片大小': f"{avg_chunk_size:.0f}",
            '大小方差': f"{chunk_variance:.0f}",
            '处理时间': f"{file_result.get('processing_time', 0):.2f}s"
        })
    
    if quality_data:
        df_quality = pd.DataFrame(quality_data)
        st.dataframe(df_quality, use_container_width=True)
    
    # 问题诊断
    st.markdown("---")
    render_issue_diagnosis(files)

def render_issue_diagnosis(files: List[Dict[str, Any]]):
    """渲染问题诊断"""
    
    st.markdown("**🔍 问题诊断**")
    
    issues = []
    
    for file_result in files:
        filename = file_result.get('metadata', {}).get('title', file_result.get('filename', '未知文件'))
        
        # 检查处理失败
        if file_result.get('status') != 'success':
            issues.append({
                'type': 'error',
                'file': filename,
                'message': f"文件处理失败: {file_result.get('error', '未知错误')}"
            })
        
        # 检查切片问题
        chunks = file_result.get('chunks', [])
        if chunks:
            # 检查过小的切片
            small_chunks = [i for i, chunk in enumerate(chunks) if len(chunk.get('content', '')) < 50]
            if small_chunks:
                issues.append({
                    'type': 'warning',
                    'file': filename,
                    'message': f"发现 {len(small_chunks)} 个过小的切片（<50字符）"
                })
            
            # 检查过大的切片
            large_chunks = [i for i, chunk in enumerate(chunks) if len(chunk.get('content', '')) > 5000]
            if large_chunks:
                issues.append({
                    'type': 'warning',
                    'file': filename,
                    'message': f"发现 {len(large_chunks)} 个过大的切片（>5000字符）"
                })
            
            # 检查空切片
            empty_chunks = [i for i, chunk in enumerate(chunks) if not chunk.get('content', '').strip()]
            if empty_chunks:
                issues.append({
                    'type': 'warning',
                    'file': filename,
                    'message': f"发现 {len(empty_chunks)} 个空切片"
                })
        
        # 检查清洗效果
        stats = file_result.get('stats', {})
        original_chars = stats.get('original_characters', 0)
        final_chars = stats.get('total_characters', 0)
        
        if original_chars > 0:
            cleaning_rate = (original_chars - final_chars) / original_chars * 100
            
            if cleaning_rate > 50:
                issues.append({
                    'type': 'warning',
                    'file': filename,
                    'message': f"清洗率过高（{cleaning_rate:.1f}%），可能过度清洗"
                })
            elif cleaning_rate < 5:
                issues.append({
                    'type': 'info',
                    'file': filename,
                    'message': f"清洗率较低（{cleaning_rate:.1f}%），文档可能已经很干净"
                })
    
    # 显示问题
    if issues:
        for issue in issues:
            if issue['type'] == 'error':
                st.error(f"**{issue['file']}**: {issue['message']}")
            elif issue['type'] == 'warning':
                st.warning(f"**{issue['file']}**: {issue['message']}")
            else:
                st.info(f"**{issue['file']}**: {issue['message']}")
    else:
        st.success("✅ 未发现明显问题，处理质量良好！")
    
    # 优化建议
    st.markdown("---")
    st.markdown("**💡 优化建议**")
    
    suggestions = []
    
    # 基于问题生成建议
    if any(issue['type'] == 'error' for issue in issues):
        suggestions.append("检查失败文件的格式和完整性，确保文件未损坏")
    
    if any('过小的切片' in issue['message'] for issue in issues):
        suggestions.append("考虑增加最小切片大小或启用切片合并功能")
    
    if any('过大的切片' in issue['message'] for issue in issues):
        suggestions.append("考虑减小切片大小或使用更细粒度的切片策略")
    
    if any('清洗率过高' in issue['message'] for issue in issues):
        suggestions.append("调整清洗参数，减少不必要的内容移除")
    
    # 通用建议
    total_chunks = sum(len(f.get('chunks', [])) for f in files)
    if total_chunks > 1000:
        suggestions.append("切片数量较多，建议优化切片策略以提高处理效率")
    
    avg_processing_time = sum(f.get('processing_time', 0) for f in files) / len(files) if files else 0
    if avg_processing_time > 10:
        suggestions.append("处理时间较长，建议优化文档大小或处理参数")
    
    if suggestions:
        for suggestion in suggestions:
            st.markdown(f"• {suggestion}")
    else:
        st.markdown("当前配置和处理结果已经很好，无需特别优化。")