import streamlit as st
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from datetime import datetime, timedelta
import os
import json

class AnalysisView:
    def __init__(self):
        if 'ai_fields' not in st.session_state:
            raise ValueError("ai_fields not found in session state")
    
    def render(self):
        """渲染结果分析页面"""
        st.markdown("<h1 class='main-header'>结果分析</h1>", unsafe_allow_html=True)
        current_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
        result_path = os.path.join(current_dir, "target")
        # 读取结果文件
        result_file = os.path.join(result_path, f"Email_Aidata_{st.session_state['config_params']['email_folder_version']}.xlsx")
        
        if not os.path.exists(result_file):
            st.warning("⚠️ 未找到结果文件")
            return
        
        try:
            # 读取结果数据
            df = pd.read_excel(result_file, header=1)
            if len(df) == 0:
                st.warning("结果文件为空，暂无数据可分析")
                return
            
            # 基础统计信息
            self.render_basic_stats(df)
            
            st.divider()
            
            # AI识别字段覆盖率分析
            self.render_field_coverage_analysis(df)
            
            st.divider()
            
            # 处理时间趋势分析
            self.render_time_trend_analysis(df)
            
            st.divider()
            
            # 委托单号分析
            self.render_delegation_analysis(df)
            
        except Exception as e:
            st.error(f"❌ 读取结果文件失败: {str(e)}")
            st.exception(e)
    
    def render_basic_stats(self, df):
        """渲染基础统计信息"""
        st.subheader("基础统计")
        
        # 计算基础指标
        total_records = len(df)
        
        # 今日处理量（基于数据更新时间列）
        today_processed = 0
        if '数据更新时间' in df.columns:
            try:
                # 使用pandas向量化操作计算今日处理量
                today = datetime.now().date()
                # 转换数据更新时间为日期格式并与今天比较
                # 明确指定日期格式以避免警告
                today_mask = pd.to_datetime(df['数据更新时间'], 
                                          format='%Y-%m-%d %H:%M:%S', 
                                          errors='coerce').dt.date == today
                today_processed = today_mask.sum()
            except Exception as e:
                # 如果指定格式失败，尝试自动推断但忽略警告
                import warnings
                with warnings.catch_warnings():
                    warnings.simplefilter("ignore")
                    today_mask = pd.to_datetime(df['数据更新时间'], errors='coerce').dt.date == today
                    today_processed = today_mask.sum()
            except Exception as e:
                st.warning(f"处理数据更新时间时出错: {str(e)}")
        
        # 覆盖率计算（基于非空字段比例）
        coverage_rate = 0
        if total_records > 0:
            # 计算AI字段的非空比例
            ai_fields_in_df = [field for field in st.session_state['ai_fields'] if field in df.columns]
            if ai_fields_in_df:
                non_empty_count = 0
                total_count = 0
                for field in ai_fields_in_df:
                    field_non_empty = df[field].notna().sum()
                    non_empty_count += field_non_empty
                    total_count += len(df)
                coverage_rate = (non_empty_count / total_count * 100) if total_count > 0 else 0
        
        # 显示指标
        col1, col2, col3 = st.columns(3)
        
        with col1:
            st.metric(
                label="总记录数",
                value=total_records,
                help="唯一委托单号数"
            )
        
        with col2:
            st.metric(
                label="今日处理量",
                value=today_processed,
                help="今日更新的委托记录行数"
            )
        
        with col3:
            st.metric(
                label="覆盖率",
                value=f"{coverage_rate:.1f}%",
                help="AI字段识别覆盖率：(识别字段的行数/所有行数的百分比)*100"
            )
    
    def render_field_coverage_analysis(self, df):
        """渲染AI识别字段覆盖率分析"""
        st.subheader("🎯 AI识别字段覆盖率分析")
        
        # 计算字段覆盖率
        coverage_data = []
        ai_fields_in_df = [field for field in st.session_state['ai_fields'] if field in df.columns]
        for field in ai_fields_in_df:
            total_count = len(df)
            non_empty_count = df[field].notna().sum()
            # 进一步排除空字符串
            non_empty_count = df[field].apply(lambda x: pd.notna(x) and str(x).strip() != '').sum()
            coverage_rate = (non_empty_count / total_count * 100) if total_count > 0 else 0
            
            coverage_data.append({
                '字段名称': field,
                '覆盖率': coverage_rate,
                '识别数量': non_empty_count,
                '总数量': total_count
            })
        
        if coverage_data:
            coverage_df = pd.DataFrame(coverage_data)
            
            # 显示覆盖率表格
            st.dataframe(
                coverage_df.style.format({
                    '覆盖率': '{:.2f}%'
                }),
                use_container_width=True
            )
            
            # 覆盖率可视化
            col1, col2 = st.columns(2)
            
            with col1:
                fig_bar = px.bar(
                    coverage_df,
                    x='字段名称',
                    y='覆盖率',
                    title="各字段识别覆盖率",
                    color='覆盖率',
                    color_continuous_scale='RdYlGn',
                    range_color=[0, 100]
                )
                fig_bar.update_xaxes(tickangle=45)
                fig_bar.update_layout(height=400)
                st.plotly_chart(fig_bar, use_container_width=True)
            
            with col2:
                # 整体覆盖率饼图
                avg_coverage = coverage_df['覆盖率'].mean()
                fig_pie = go.Figure(data=[go.Pie(
                    labels=['已识别', '未识别'],
                    values=[avg_coverage, 100 - avg_coverage],
                    hole=.4,
                    marker_colors=['#2ecc71', '#e74c3c']
                )])
                fig_pie.update_layout(
                    title_text="整体字段识别覆盖率",
                    annotations=[dict(text=f"平均覆盖率<br>{avg_coverage:.1f}%", 
                                    x=0.5, y=0.5, font_size=15, showarrow=False)],
                    height=400
                )
                st.plotly_chart(fig_pie, use_container_width=True)
        else:
            st.info("📝 暂无AI字段数据可分析")
    
    def render_time_trend_analysis(self, df):
        """渲染处理时间趋势分析"""
        st.subheader("📈 处理时间趋势分析")
        
        # 检查是否有时间相关字段
        time_fields = ['处理时间', '创建时间', '时间戳']
        time_field = None
        for field in time_fields:
            if field in df.columns:
                time_field = field
                break
        
        if time_field and not df[time_field].isna().all():
            try:
                # 转换时间格式
                df[time_field] = pd.to_datetime(df[time_field], errors='coerce')
                df_with_time = df.dropna(subset=[time_field])
                
                if len(df_with_time) > 0:
                    # 按日期分组统计
                    df_with_time['日期'] = df_with_time[time_field].dt.date
                    daily_stats = df_with_time.groupby('日期').size().reset_index(name='处理数量')
                    daily_stats['日期'] = pd.to_datetime(daily_stats['日期'])
                    
                    # 绘制趋势图
                    fig_trend = px.line(
                        daily_stats,
                        x='日期',
                        y='处理数量',
                        title="每日邮件处理数量趋势",
                        markers=True
                    )
                    fig_trend.update_layout(height=400)
                    st.plotly_chart(fig_trend, use_container_width=True)
                    
                    # 显示统计表格
                    st.dataframe(daily_stats, use_container_width=True)
                else:
                    st.info("📅 暂无有效的时间数据")
            except Exception as e:
                st.warning(f"⚠️ 时间数据处理失败: {str(e)}")
        else:
            # 使用文件历史数据模拟趋势
            st.info("📅 未找到时间字段")
            
    
    def render_delegation_analysis(self, df):
        """渲染委托单号分析"""
        st.subheader("📋 委托单号分析")
        
        if '委托单号' not in df.columns:
            st.warning("⚠️ 数据中未找到委托单号字段")
            return
        
        # 计算每个委托单号的字段完整度
        wt_completeness = []
        for wt_no in df['委托单号'].dropna().unique():
            wt_data = df[df['委托单号'] == wt_no]
            # 计算该委托单号在AI字段中的非空字段数
            field_count = 0
            for field in st.session_state['ai_fields']:
                if field in df.columns:
                    if (wt_data[field].notna() & (wt_data[field].astype(str).str.strip() != '')).any():
                        field_count += 1
            
            wt_completeness.append({
                '委托单号': wt_no,
                '处理次数': len(wt_data),
                '字段完整度': field_count,
                '完整度百分比': round((field_count / len(st.session_state['ai_fields'])) * 100, 1)
            })
        
        if wt_completeness:
            wt_df = pd.DataFrame(wt_completeness)
            # 按字段完整度排序，取前10个
            top_complete_wt = wt_df.nlargest(10, '字段完整度')
            
            fig_wt = px.bar(
                top_complete_wt,
                x='委托单号',
                y='字段完整度',
                title="字段最全的委托单号 (Top 10)",
                labels={'字段完整度': '完整字段数量'},
                hover_data=['处理次数', '完整度百分比']
            )
            fig_wt.update_xaxes(tickangle=45)
            st.plotly_chart(fig_wt, use_container_width=True)
            
            # 显示详细表格
            st.subheader("委托单号字段完整度详情")
            st.dataframe(top_complete_wt, use_container_width=True)
        else:
            st.info("📝 暂无委托单号数据可分析")