# -*- coding: utf-8 -*-
import streamlit as st
import pandas as pd
from modules.term_extraction import TermExtractor
from modules.clustering import Clusterer
from modules.topic_labeling import TopicLabeler
from streamlit_echarts import st_echarts
from utils.time_utils import format_time, estimate_remaining_time
import urllib.parse
import time
import json
import re
import os
from dotenv import load_dotenv
import streamlit as st

load_dotenv()

def main():
    st.set_page_config(page_title="科技文献聚类分析系统", layout="wide")
    
    st.title("科技文献聚类与主题标签揭示软件")
    
    # 在侧边栏创建步骤选择
    step = st.sidebar.radio("选择步骤", ["术语抽取", "聚类及可视化", "主题标签揭示"])
    
    # 术语抽取部分
    if step == "术语抽取":
        st.header("术语抽取")
        
        # 主界面文件上传和提示词模板
        uploaded_file = st.file_uploader("上传CSV文件", type=['csv'])

        # 侧边栏参数设置
        with st.sidebar:
            st.subheader("参数设置")
            api_key = st.text_input("API Key", type="password", value=os.getenv("LLM_API_KEY"))
            base_url = st.text_input("Base URL", value=os.getenv("LLM_BASE_URL"))
            model_name = st.text_input("模型名称", value=os.getenv("LLM_MODEL_NAME"))
            col1, col2 = st.columns(2)
            with col1:
                start_button = st.button("RUN")
        
        results = []
        error_indices = []

        # 主界面显示结果
        if start_button and uploaded_file is not None:
            # 创建显示容器
            progress_placeholder = st.empty()
            status_placeholder = st.empty()
            time_placeholder = st.empty()
            estimate_placeholder = st.empty()
            
            try:
                # 读取CSV文件并获取总记录数
                df = pd.read_csv(uploaded_file)
                total_records = len(df)
                
                # 创建进度显示
                with progress_placeholder.container():
                    progress_bar = st.progress(0)
                
                # 记录开始时间
                start_time = time.time()
                
                # 创建抽取器并处理
                extractor = TermExtractor(
                    api_key, 
                    base_url, 
                    model_name,
                )
                
                def update_progress(i):
                    current = i + 1
                    progress_bar.progress(current / total_records)
                    status_placeholder.text(f"正在处理: {current}/{total_records}")
                    
                    # 计算已用时间
                    elapsed_time = time.time() - start_time
                    time_placeholder.text(f"已用时间: {format_time(elapsed_time)}")
                    
                    # 估算剩余时间
                    remaining_time = estimate_remaining_time(elapsed_time, current, total_records)
                    estimate_placeholder.text(f"预计剩余时间: {remaining_time}")
                    
                
                for i, row in df.iterrows():
                    update_progress(i)
                    try:
                        response = extractor.extract_terms_single(row)  # 假设有单条处理的方法
                        results.append(response.strip())
                    except Exception as e:
                        error_indices.append(i)
                        extractor.log_error(i, row, str(e))
                        results.append("")
                
                # 显示处理结果
                if len(error_indices) > 0:
                    st.error(f"处理过程中出现 {len(error_indices)} 条记录错误")
                    
                    # 获取出错记录
                    error_df = extractor.get_error_records_df()
                    
                    # 下载出错记录
                    st.download_button(
                        "下载出错记录",
                        error_df.to_csv(index=False).encode('utf-8'),
                        "error_records.csv",
                        "text/csv"
                    )
                
                # 过滤掉空值记录（出错的记录）
                valid_results = pd.DataFrame({'terms': results})
                
                # 显示有效结果
                st.write(f"已处理{total_records} 条记录，失败 {len(error_indices)} 条记录")
                st.dataframe(valid_results)
                
                # 下载成功处理的结果
                st.download_button(
                    "下载提取结果",
                    valid_results.to_csv(index=False).encode('utf-8'),
                    "extracted_terms.txt",
                    "text/plain"
                )
                
            except Exception as e:
                st.error(f"程序发生错误: {str(e)}")
                progress_placeholder.empty()
            

    # 聚类及可视化部分
    elif step == "聚类及可视化":
        st.header("聚类及可视化")
        
        terms_file = st.file_uploader("上传术语列表", type=['txt'])
        
        # 侧边栏参数设置
        with st.sidebar:
            st.subheader("参数设置")
            st.subheader("聚类参数")
            
            # 第一行
            col1, col2 = st.columns(2)
            with col1:
                num_nodes = st.number_input("节点个数", value=300)
            
            with col2:
                num_relations = st.number_input("关系个数", value=100000)
            
            start_button = st.button("RUN")
        
        # 主界面显示结果
        if start_button and terms_file is not None:
            # 创建进度显示容器
            progress_placeholder = st.empty()
            status_placeholder = st.empty()
            
            try:
                with progress_placeholder.container():
                    progress_bar = st.progress(0)
                
                # 更新状态
                status_placeholder.text("正在初始化聚类器...")
                progress_bar.progress(20)
                
                clusterer = Clusterer(
                    num_nodes=num_nodes,
                    num_relations=num_relations
                )
                
                status_placeholder.text("正在处理数据并进行聚类...")
                progress_bar.progress(40)
                
                vosviewer_url = clusterer.process(terms_file)
                
                status_placeholder.text("正在生成可视化...")
                progress_bar.progress(80)
                
                # 处理完成
                progress_bar.progress(100)
                status_placeholder.text("处理完成！")
                time.sleep(0.1)
                
                # 清除进度显示
                progress_placeholder.empty()
                status_placeholder.empty()
                
               
                st.header("可视化聚类结果")

                import streamlit.components.v1 as components

                components.iframe(vosviewer_url, height=800)

                
            except Exception as e:
                status_placeholder.error(f"处理过程中发生错误: {str(e)}")
                progress_placeholder.empty()

    # 主题标签揭示部分
    else:
        st.header("主题标签揭示")
        
        # 初始化 session state
        if 'topics_text' not in st.session_state:
            st.session_state.topics_text = None
        if 'topics' not in st.session_state:
            st.session_state.topics = None
            
        # 侧边栏参数设置
        with st.sidebar:
            st.subheader("参数设置")
            api_key = st.text_input("API Key", type="password", value=os.getenv("LLM_API_KEY"))
            base_url = st.text_input("Base URL", value=os.getenv("LLM_BASE_URL"))
            model_name = st.text_input("模型名称", value=os.getenv("LLM_MODEL_NAME"))
            
            st.subheader("输出设置")
            use_stream = st.checkbox("使用流式输出", value=True, help="实时显示AI生成的内容，提供更好的交互体验")
            
            if use_stream:
                st.info("💡 流式输出模式：将实时显示AI的思考和分析过程")
            else:
                st.info("⚡ 标准模式：一次性显示完整结果")

        # 上传聚类文件
        st.info("""
        **支持的文件格式：**
        - **TXT格式**：每行格式为 `cluster_id\t术语1;术语2;术语3`（用于主题标签生成）
        - **JSON格式**：VOSviewer网络JSON格式（可同时用于主题标签生成和可视化）
        
        💡 **提示**：上传一个JSON格式文件即可完成主题标签分析和可视化，无需重复上传！
        """)
        cluster_file = st.file_uploader("上传聚类数据文件", type=['txt', 'json'])
        get_topics_button = st.button("获取主题标签")

        if get_topics_button and cluster_file is not None:
            try:
                # 处理主题标签
                labeler = TopicLabeler(api_key, base_url, model_name)
                # 需要重置文件指针，因为文件可能已经被读取过
                cluster_file.seek(0)
                
                if use_stream:
                    # 使用流式输出
                    st.write("🤖 AI正在分析聚类结果并生成主题标签...")
                    
                    # 创建一个空的容器用于流式显示
                    stream_container = st.empty()
                    full_response = ""
                    
                    # 重置文件指针用于流式处理
                    cluster_file.seek(0)
                    
                    try:
                        for chunk in labeler.label_topics_stream(cluster_file):
                            full_response += chunk
                            # 实时更新显示内容，添加打字机效果
                            stream_container.markdown(f"""
                            **🤖 AI分析结果：**
                            
                            {full_response}
                            
                            ---
                            *正在生成中...*
                            """)
                            # 添加短暂延迟以提供更好的用户体验
                            time.sleep(0.01)
                        
                        # 流式输出完成后存储结果
                        st.session_state.topics_text = full_response
                        
                        # 更新最终显示内容，移除"正在生成中"提示
                        stream_container.markdown(f"""
                        **🤖 AI分析结果：**
                        
                        {full_response}
                        
                        ---
                        ✅ **生成完成！**
                        """)
                        
                        # 添加完成提示
                        st.success("✅ 主题标签生成完成！")
                        
                    except Exception as stream_error:
                        st.error(f"流式输出过程中发生错误：{str(stream_error)}")
                        # 如果流式输出失败，回退到普通输出
                        st.warning("正在使用普通模式重试...")
                        cluster_file.seek(0)
                        st.session_state.topics_text = labeler.label_topics(cluster_file)
                        st.success("主题标签获取成功！")
                else:
                    # 使用普通输出
                    with st.spinner("正在生成主题标签..."):
                        st.session_state.topics_text = labeler.label_topics(cluster_file)
                    st.success("主题标签获取成功！")
                
                # 提取主题标签
                if st.session_state.topics_text:
                    topics_text_str = str(st.session_state.topics_text)
                    st.session_state.topics = ';'.join([match.group(1) for match in re.finditer(r'《(.*?)》', topics_text_str)])
                else:
                    st.session_state.topics = ""
                    
            except Exception as e:
                st.error(f"处理文件时发生错误：{str(e)}")
                st.session_state.topics_text = None
                st.session_state.topics = None
            

        # 显示主题标签结果（如果存在）
        if st.session_state.topics_text:
            # 在非流式模式下显示完整结果
            if not use_stream:
                st.subheader("🎯 主题标签分析结果")
                st.write(st.session_state.topics_text)
            
            # 提供下载功能
            col1, col2 = st.columns([1, 4])
            with col1:
                st.download_button(
                        "📥 下载结果",
                        str(st.session_state.topics_text).encode('utf-8'),
                        "topics_text.txt",
                        "text/plain",
                        use_container_width=True
                    )
            # 第二步：编辑主题标签并生成可视化图表
            st.subheader("🎨 生成可视化图表")
            
            # 检查文件类型
            cluster_file.seek(0)
            file_content = cluster_file.read().decode('utf-8')
            cluster_file.seek(0)
            
            is_json_with_network = False
            try:
                data = json.loads(file_content)
                if 'network' in data and 'items' in data['network']:
                    is_json_with_network = True
            except:
                pass
            
            if is_json_with_network:
                st.success("✅ 检测到包含网络数据的JSON文件，可以生成可视化图表！")
                topics_input = st.text_area("编辑主题标签（用分号分隔）", st.session_state.topics or "")
                visualize_button = st.button("🚀 生成图表")
            else:
                st.warning("⚠️ 当前文件不支持可视化功能。请上传包含网络数据的JSON文件（如VOSviewer格式）以启用可视化功能。")
                topics_input = st.text_area("主题标签（仅供参考，无法生成图表）", st.session_state.topics or "", disabled=True)
                visualize_button = None

            if visualize_button is not None and visualize_button and topics_input and cluster_file is not None:
                # 处理主题标签列表
                print(topics_input)
                topic_list = [topic.strip() for topic in topics_input.split(';') if topic.strip()]
                print(topic_list)
                
                # 重置文件指针并读取图表数据
                cluster_file.seek(0)
                file_content = cluster_file.read().decode('utf-8')
                cluster_file.seek(0)
                
                # 解析图表数据
                graph_data = json.loads(file_content)
                
                # 继续处理可视化（前面已经验证了文件格式）
                nodes,links = [],[]
                for node in graph_data['network']['items']:
                    nodes.append({
                        'id': node['id'],
                        'name': node['label'],
                        'category': node['cluster']-1,
                        'x': node['x'],
                        'y': node['y']
                    })

                for link in graph_data['network']['links']:
                    links.append({
                        'source': link['source_id'],
                        'target': link['target_id']
                    })
                
                print(nodes)
                print(links)
                # 创建图表配置
                visualization = {
                    'title': {'text': '术语关系网络'},
                    'tooltip': {},
                    'legend': {'data': topic_list},
                    'series': [{
                        'type': 'graph',
                        "layout": "none",
                        "symbolSize": 10,
                        "circular": {
                            "rotateLabel": False
                        },
                        "force": {
                            "repulsion": 50,
                            "gravity": 0.2,
                            "edgeLength": 30,
                            "friction": 0.6,
                            "layoutAnimation": True
                        },
                        "label": {
                            "show": True,
                            "position": "inside",
                            "margin": 8,
                            "valueAnimation": False
                        },
                        "lineStyle": {
                            "show": True,
                            "width": 0.5,
                            "opacity": 0.7,
                            "curveness": 0.3,
                            "type": "solid"
                        },
                        "roam": True,
                        "draggable": False,
                        "focusNodeAdjacency": True,
                        'data': nodes,
                        'links': links,
                        'categories': [{'name': topic} for topic in topic_list],
                        'roam': True,
                        'labelLayout': {
                            'hideOverlap': True
                        }
                    }]
                }
            
                # 显示图表
                st.write("关系图可视化：")
                st_echarts(visualization, height="800px")

if __name__ == "__main__":
    main() 