#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：academic_trend_analysis
@File    ：app.py
@IDE     ：PyCharm
@Author  ：iyoahs
@Date    ：2025/6/20 10:45
@Describe：Streamlit 交互式分析界面
'''
import streamlit as st
import pandas as pd
from datetime import datetime
import json
import os

from src.crawler.academic_crawler import ArxivCrawler
from src.analysis.hot_term_analyzer import AcademicKeywordSearch
from src.analysis.llm_analyzer import LLMAnalyzer
from src.utils.mongodb_utils import MongoDBUtils
from config.config import (
    MONGO_CONFIG, SPARK_CONFIG, LLM_CONFIG,
    KEYWORD_ANALYSIS_CONFIG, STREAMLIT_CONFIG,
    TOPICS, HOURS, MAX_RESULTS, LAST_INFO
)

st.set_page_config(
    page_title=STREAMLIT_CONFIG["page_title"],
    page_icon=STREAMLIT_CONFIG["page_icon"],
    layout=STREAMLIT_CONFIG["layout"],
    initial_sidebar_state=STREAMLIT_CONFIG["initial_sidebar_state"]
)


def load_last_info():
    """从JSON文件加载上次的状态"""
    if 'analysis_results' not in st.session_state:
        st.session_state.analysis_results = None
    if 'last_processed' not in st.session_state:
        st.session_state.last_processed = None
    if 'paper_count' not in st.session_state:
        st.session_state.paper_count = 0

    if os.path.exists(LAST_INFO):
        try:
            with open(LAST_INFO, "r", encoding="utf-8") as f:
                data = json.load(f)
                st.session_state.paper_count = data.get("paper_count", 0)
                st.session_state.analysis_results = data.get("analysis_results", None)
                st.session_state.last_processed = data.get("last_processed", None)
        except Exception as e:
            st.warning(f"无法读取上次信息: {e}")


def save_last_info():
    """将当前状态保存到JSON文件"""
    data = {
        "paper_count": st.session_state.get("paper_count", 0),
        "analysis_results": st.session_state.get("analysis_results"),
        "last_processed": st.session_state.get("last_processed")
    }
    try:
        with open(LAST_INFO, "w", encoding="utf-8") as f:
            json.dump(data, f, ensure_ascii=False, indent=2)
    except Exception as e:
        st.warning(f"保存失败: {e}")


@st.cache_resource
def init_components():
    return {
        "mongodb": MongoDBUtils(MONGO_CONFIG),
        "llm": LLMAnalyzer(LLM_CONFIG),
        "crawler": ArxivCrawler(MONGO_CONFIG),
        "keyword_analyzer": AcademicKeywordSearch({
            "spark_config": SPARK_CONFIG,
            "special_char_map": KEYWORD_ANALYSIS_CONFIG["special_char_map"],
            "match_threshold": KEYWORD_ANALYSIS_CONFIG["match_threshold"],
            "default_keyword_file": "data/ai_research_keywords.csv",
            "default_output_file": "output/analysis_result.csv",
        })
    }


components = init_components()

# 初始化 session_state
if 'analysis_results' not in st.session_state:
    st.session_state.analysis_results = None
if 'last_processed' not in st.session_state:
    st.session_state.last_processed = None
if 'paper_count' not in st.session_state:
    st.session_state.paper_count = 0

# 加载上一次的信息
load_last_info()


def show_processing_spinner(task):
    """显示处理状态装饰器"""
    def decorator(func):
        def wrapper(*args, **kwargs):
            with st.spinner(f"{task}..."):
                result = func(*args, **kwargs)
                st.toast(f"{task}完成!", icon="✅")
                return result
        return wrapper
    return decorator


@show_processing_spinner("正在获取最新论文")
def fetch_recent_papers():
    """从arXiv获取最新论文"""
    try:
        papers = components["crawler"].crawl_recent_papers(
            query_terms=TOPICS,
            hours=HOURS,
            max_results=MAX_RESULTS
        )
        st.session_state.paper_count = len(papers)
        st.success(f"成功获取 {len(papers)} 篇最新论文")
        save_last_info()
        return papers
    except Exception as e:
        st.error(f"获取论文失败: {str(e)}")
        raise


@show_processing_spinner("正在分析关键词")
def run_keyword_analysis():
    """执行关键词分析"""
    try:
        results = components["keyword_analyzer"].run_analysis()
        st.session_state.analysis_results = sorted(
            results,
            key=lambda x: x["paper_count"],
            reverse=True
        )
        st.session_state.last_processed = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        components["mongodb"].save_analysis_result(
            "keyword_trends",
            {"results": st.session_state.analysis_results}
        )
        save_last_info()
    except Exception as e:
        st.error(f"分析失败: {str(e)}")
        raise


def display_data_status():
    """显示数据状态卡片"""
    cols = st.columns(3)
    with cols[0]:
        st.metric("已获取论文数", st.session_state.paper_count)
    with cols[1]:
        st.metric("分析关键词数",
                  len(st.session_state.analysis_results) if st.session_state.analysis_results else 0)
    with cols[2]:
        st.metric("最后更新时间",
                  st.session_state.last_processed or "尚未分析")


def display_trend_chart():
    """显示趋势图表"""
    if not st.session_state.analysis_results:
        st.warning("请先运行关键词分析")
        return
    df = pd.DataFrame(st.session_state.analysis_results)
    top_10 = df.head(10)
    st.subheader("🔥 热门研究关键词 Top 10")
    tab1, tab2 = st.tabs(["柱状图", "数据表格"])
    with tab1:
        st.bar_chart(
            top_10.set_index("keyword")["paper_count"],
            color="#FF4B4B",
            height=400
        )
    with tab2:
        st.dataframe(
            top_10[["keyword", "paper_count"]],
            column_config={
                "keyword": "关键词",
                "paper_count": st.column_config.NumberColumn(
                    "相关论文数",
                    help="匹配该关键词的论文数量",
                    format="%d 篇"
                )
            },
            hide_index=True
        )


def display_paper_details():
    """显示论文详情"""
    if not st.session_state.analysis_results:
        return
    selected_keyword = st.selectbox(
        "🔍 选择关键词查看论文详情",
        options=[r["keyword"] for r in st.session_state.analysis_results],
        index=0
    )
    if selected_keyword:
        matched_papers = components["mongodb"].get_papers_by_keywords(
            [selected_keyword],
            limit=20
        )
        if not matched_papers:
            st.warning(f"没有找到关于 '{selected_keyword}' 的论文")
            return
        st.subheader(f"📚 相关论文 ({len(matched_papers)}篇)")
        papers = []
        for i, paper in enumerate(matched_papers, 1):
            with st.expander(f"{i}. {paper['title']}", expanded=False):
                col1, col2 = st.columns([3, 1])
                with col1:
                    st.markdown(f"""
                    **作者**: {', '.join(paper['authors'][:5])}{'...' if len(paper['authors']) > 5 else ''}  
                    **发表时间**: {paper['published']}  
                    **摘要**: {paper['abstract']}
                    """)
                    p = {
                        "title": paper['title'],
                        "abstract": paper['abstract'],
                    }
                    papers.append(p)
                with col2:
                    if st.button("AI中文摘要", key=f"sum_{paper['id']}"):
                        with st.spinner("生成中..."):
                            summary = components["llm"].generate_summary(title=paper['title'], abstract=paper['abstract'])
                            st.info(f"**AI摘要**:\n{summary}")
                    st.link_button("查看原文", paper["url"])

        if st.button("生成趋势分析报告", key=f"report_{selected_keyword}"):
            with st.spinner("AI正在生成分析报告..."):
                # 生成趋势报告
                report = components["llm"].generate_report(
                    topic=selected_keyword,
                    papers=papers
                )
                st.markdown(f"## 📊 {selected_keyword} 研究趋势分析")
                st.markdown(report.strip("```markdown").strip("```"))


def main():
    """主界面"""
    st.title("📈 科研大爆发——人工智能领域研究分析系统")
    st.markdown("---")
    # 操作区
    with st.container():
        col1, col2, col3 = st.columns([2, 2, 1])
        with col1:
            if st.button("🔄 获取最新论文",
                         help=f"从arXiv获取最近{HOURS}小时内{MAX_RESULTS}篇{TOPICS}领域的论文",
                         type="primary"):
                fetch_recent_papers()
                st.rerun()
        with col2:
            if st.button("🔍 分析研究热点",
                         disabled=st.session_state.paper_count == 0,
                         help="分析当前数据库中的论文热点"):
                run_keyword_analysis()
                st.rerun()
        with col3:
            if st.button("🗑️ 清除缓存",
                         help="清除所有分析Session缓存"):
                st.cache_resource.clear()
                st.session_state.clear()
                # 重新初始化 session_state
                st.session_state.paper_count = 0
                st.session_state.analysis_results = None
                st.session_state.last_processed = None
                # 删除保存的文件
                if os.path.exists(LAST_INFO):
                    os.remove(LAST_INFO)
                st.rerun()
    st.markdown("---")
    # 数据状态区
    display_data_status()
    st.markdown("---")
    # 分析结果区
    display_trend_chart()
    display_paper_details()


if __name__ == "__main__":
    main()