import streamlit as st
import os
import tempfile
from datetime import datetime
from io import BytesIO

# Import our project modules from the 'src' directory
from src.resume_parser import ResumeParser
from src.ai_analyzer import DeepSeekAnalyzer
from src.batch_processor import BatchProcessor
from src.excel_exporter import ExcelExporter
import pandas as pd  # 添加 pandas 库的导入语句

# --- App Configuration ---
st.set_page_config(
    page_title="AI 简历筛选系统",
    page_icon="🤖",
    layout="wide"
)

# --- Constants ---
UPLOADS_DIR = "uploads"
REPORTS_DIR = "reports"

# --- Initialization ---
# Create necessary directories if they don't exist
os.makedirs(UPLOADS_DIR, exist_ok=True)
os.makedirs(REPORTS_DIR, exist_ok=True)

@st.cache_resource
def load_components():
    """
    Load and initialize all the necessary components for the app.
    Using st.cache_resource ensures these are loaded only once.
    """
    parser = ResumeParser()
    analyzer = DeepSeekAnalyzer()
    exporter = ExcelExporter()
    processor = BatchProcessor(parser, analyzer)
    return parser, analyzer, exporter, processor

parser, analyzer, exporter, processor = load_components()

# --- Sidebar ---
with st.sidebar:
    st.header("⚙️ 职位要求设置")
    
    job_title = st.text_input("职位名称", "Python开发工程师")
    
    default_requirements = """
- 3年以上Python开发经验
- 熟悉Django/Flask框架
- 熟悉MySQL数据库
- 有RESTful API开发经验
- 本科及以上学历,计算机相关专业
"""
    job_requirements = st.text_area("职位要求", default_requirements, height=200)
    
    st.markdown("---")
    st.header("🛠️ 筛选设置")
    min_score = st.slider("最低分数线", 0, 100, 60)
    max_workers = st.slider("并发处理数", 1, 8, 4)

# --- Main Interface ---
col1, col2 = st.columns([1, 3])  # 可以根据需要调整比例
with col1:
    st.title("白菜-AI")
with col2:
    st.title("智能简历筛选系统")
st.markdown("一个基于大模型的智能简历分析与匹配工具")

tab1, tab2, tab3 = st.tabs(["📄 单份简历分析", "📂 批量简历处理", "ℹ️ 使用说明"])

# --- Single Resume Analysis Tab ---
with tab1:
    st.header("上传单份简历进行分析")
    
    uploaded_file = st.file_uploader(
        "支持 PDF、Word、文本格式",
        type=['pdf', 'docx', 'txt'],
        accept_multiple_files=False
    )

    if uploaded_file:
        # Save the uploaded file to a temporary location to get a persistent path
        with tempfile.NamedTemporaryFile(delete=False, suffix=f"_{uploaded_file.name}") as tmp_file:
            tmp_file.write(uploaded_file.getvalue())
            tmp_file_path = tmp_file.name

        if st.button("🚀 开始分析", type="primary", key="single_analyze"):
            with st.spinner("正在分析简历，请稍候..."):
                try:
                    result = processor.process_single_resume(tmp_file_path, job_requirements)
                    
                    st.subheader("📊 分析结果")
                    col1, col2, col3, col4 = st.columns(4)
                    col1.metric("综合评分", f"{result.get('overall_score', 0)}分")
                    col2.metric("技能匹配", f"{result.get('skill_match', 0)}分")
                    col3.metric("经验匹配", f"{result.get('experience_match', 0)}分")
                    col4.metric("教育背景", f"{result.get('education_match', 0)}分")

                    st.subheader("📝 详细评估")
                    col1, col2 = st.columns(2)
                    with col1:
                        st.markdown(f"**✅ 推荐理由:**")
                        st.info(result.get('recommendation', 'N/A'))
                    with col2:
                        st.markdown(f"**⚠️ 需要关注:**")
                        st.warning(result.get('concerns', 'N/A'))

                    if result.get('overall_score', 0) >= min_score:
                        st.success(f"🎉 **推荐面试!** 综合评分高于最低分数线 {min_score} 分。")
                    else:
                        st.error(f"❌ **不建议面试。** 综合评分低于最低分数线 {min_score} 分。")

                except Exception as e:
                    st.error(f"分析过程中出现错误: {e}")
                finally:
                    # Clean up the temporary file
                    os.unlink(tmp_file_path)

# --- Batch Resume Processing Tab ---
with tab2:
    st.header("上传多份简历进行批量处理")
    
    uploaded_files = st.file_uploader(
        "可以一次上传多个文件 (PDF, DOCX, TXT)",
        type=['pdf', 'docx', 'txt'],
        accept_multiple_files=True
    )

    if uploaded_files:
        st.info(f"已成功上传 {len(uploaded_files)} 份简历。")
        
        if st.button("🚀 开始批量处理", type="primary", key="batch_analyze"):
            with tempfile.TemporaryDirectory() as temp_dir:
                # Save all uploaded files to the temporary directory
                file_paths = []
                for file in uploaded_files:
                    path = os.path.join(temp_dir, file.name)
                    with open(path, 'wb') as f:
                        f.write(file.getvalue())
                    file_paths.append(path)

                # Process the batch
                with st.spinner(f"正在批量处理 {len(file_paths)} 份简历..."):
                    results = processor.process_batch(temp_dir, job_requirements, max_workers)

                if results:
                    st.subheader("📈 处理结果汇总")
                    total_count = len(results)
                    qualified_count = sum(1 for r in results if r.get('overall_score', 0) >= min_score)
                    avg_score = sum(r.get('overall_score', 0) for r in results) / total_count if total_count > 0 else 0

                    col1, col2, col3, col4 = st.columns(4)
                    col1.metric("总简历数", total_count)
                    col2.metric("合格简历数", qualified_count)
                    col3.metric("合格率", f"{qualified_count/total_count:.1%}" if total_count > 0 else "0%")
                    col4.metric("平均分", f"{avg_score:.1f}")

                    st.subheader("📄 详细结果")
                    df = pd.DataFrame(results)
                    df['状态'] = df['overall_score'].apply(lambda x: '✅ 推荐' if x >= min_score else '❌ 淘汰')
                    display_df = df[['file_name', 'overall_score', 'skill_match', 'experience_match', 'education_match', '状态', 'recommendation', 'concerns']]
                    display_df.columns = ['文件名', '综合评分', '技能匹配', '经验匹配', '教育背景', '状态', '推荐理由', '关注点']
                    st.dataframe(display_df, use_container_width=True)

                    # --- Download Button ---
                    output_buffer = BytesIO()
                    exporter.export(results, output_buffer)
                    
                    st.download_button(
                        label="📥 下载Excel报告",
                        data=output_buffer,
                        file_name=f"简历筛选报告_{datetime.now().strftime('%Y%m%d_%H%M%S')}.xlsx",
                        mime="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
                    )
                else:
                    st.warning("处理完成，但没有生成任何结果。")

# --- Instructions Tab ---
with tab3:
    st.header("使用说明")
    st.markdown("""
    ### 🚀 使用步骤
    1.  **设置职位要求**: 在左侧面板填写职位名称和具体要求。
    2.  **上传简历**:
        - 在“单份简历分析”标签页上传单个文件进行快速评估。
        - 在“批量简历处理”标签页上传多个文件进行批量处理和报告生成。
    3.  **开始分析**: 点击分析按钮，等待AI处理。
    4.  **查看结果**: 系统会给出详细的匹配分析和分数。
    5.  **下载报告**: 在批量处理完成后，可以下载Excel格式的汇总报告。

    ### 📊 评分说明
    - **技能匹配度**: 候选人技能与职位要求的匹配程度。
    - **经验匹配度**: 工作经验、项目经验与职位要求的匹配程度。
    - **教育背景**: 学历和专业背景的匹配程度。
    - **综合评分**: 基于以上三个维度的综合评估。

    ### 💡 性能建议
    - 批量处理时，建议每次上传的简历数量不要过多（如，不超过50份），以避免等待时间过长。
    - 如果处理速度较慢，可以适当降低“并发处理数”。
    """)