"""
Streamlit应用 - 查看实验结果数据库
用于验证数据存储是否正确
"""
import sys
import os
from pathlib import Path

# 添加项目根目录到Python路径
project_root = Path(__file__).parent.parent.parent
if str(project_root) not in sys.path:
    sys.path.insert(0, str(project_root))

import streamlit as st
import pandas as pd
import json
from sqlalchemy.orm import Session
from typing import List

from param_management_client.backend.db import get_db, SessionLocal
from param_management_client.backend.db.parameters.models import Project
from param_management_client.backend.db.experiments.models import (
    ExperimentRun,
    ExperimentDataCategory,
    ExperimentDataScalar,
    ExperimentDataSeries,
    ExperimentResult
)
from param_management_client.backend.db.experiments import crud
from param_management_client.backend.config import EXPERIMENT_RESULTS_ROOT

try:
    import pyarrow.parquet as pq
    HAS_PYARROW = True
except ImportError:
    HAS_PYARROW = False
    st.warning("⚠️ 未安装pyarrow，无法查看Parquet序列数据")


def load_experiments_for_project(db: Session, project_id: int) -> List[ExperimentRun]:
    """加载项目的所有实验"""
    return crud.get_experiment_runs_by_project(db, project_id, skip=0, limit=1000)


def get_project_list(db: Session) -> List[Project]:
    """获取所有项目列表"""
    return db.query(Project).order_by(Project.name_en).all()


def format_datetime(dt):
    """格式化日期时间"""
    if dt:
        return dt.strftime("%Y-%m-%d %H:%M:%S")
    return "N/A"


def load_parquet_data(file_path: Path):
    """加载Parquet文件数据"""
    if not HAS_PYARROW:
        return None
    try:
        if file_path.exists():
            df = pd.read_parquet(file_path)
            return df
        return None
    except Exception as e:
        st.error(f"读取Parquet文件失败: {e}")
        return None


st.set_page_config(
    page_title="实验结果数据库查看器",
    page_icon="🔬",
    layout="wide"
)

st.title("🔬 实验结果数据库查看器")
st.markdown("---")

# 侧边栏：项目选择
st.sidebar.header("📋 项目选择")

# 获取数据库会话
db = next(get_db())

try:
    # 获取项目列表
    projects = get_project_list(db)
    
    if not projects:
        st.warning("数据库中暂无项目")
        st.stop()
    
    # 项目选择器
    project_options = {f"{p.name} ({p.name_en})": p.id for p in projects}
    selected_project_name = st.sidebar.selectbox(
        "选择项目",
        options=list(project_options.keys()),
        index=0
    )
    selected_project_id = project_options[selected_project_name]
    selected_project = next(p for p in projects if p.id == selected_project_id)
    
    # 显示项目信息
    st.sidebar.markdown("### 项目信息")
    st.sidebar.write(f"**名称**: {selected_project.name}")
    st.sidebar.write(f"**英文名**: {selected_project.name_en}")
    
    # 主内容区
    # 实验列表
    st.header(f"📊 实验列表 - {selected_project.name}")
    
    experiments = load_experiments_for_project(db, selected_project_id)
    
    if not experiments:
        st.info("该项目暂无实验数据")
        st.stop()
    
    # 创建实验选择器
    experiment_options = {
        f"#{exp.id}: {exp.name or '未命名'} ({format_datetime(exp.created_at)})": exp.id 
        for exp in experiments
    }
    selected_experiment_name = st.selectbox(
        "选择实验",
        options=list(experiment_options.keys()),
        index=0
    )
    selected_experiment_id = experiment_options[selected_experiment_name]
    selected_experiment = next(exp for exp in experiments if exp.id == selected_experiment_id)
    
    st.markdown("---")
    
    # 显示实验基本信息
    st.subheader("📝 实验基本信息")
    col1, col2, col3, col4 = st.columns(4)
    with col1:
        st.metric("实验ID", selected_experiment.id)
    with col2:
        st.metric("项目ID", selected_experiment.project_id)
    with col3:
        st.metric("创建时间", format_datetime(selected_experiment.created_at))
    with col4:
        result_count = len(selected_experiment.results)
        category_count = len(selected_experiment.categories)
        st.metric("数据项", f"{result_count}文件/{category_count}分类")
    
    if selected_experiment.name:
        st.write(f"**实验名称**: {selected_experiment.name}")
    if selected_experiment.description:
        st.write(f"**描述**: {selected_experiment.description}")
    if selected_experiment.snapshot_file_path:
        st.write(f"**快照文件**: {selected_experiment.snapshot_file_path}")
    
    st.markdown("---")
    
    # 数据分类
    st.subheader("📁 数据分类")
    categories = selected_experiment.categories
    if categories:
        category_df = pd.DataFrame([
            {
                "ID": cat.id,
                "名称": cat.name,
                "描述": cat.description or "N/A",
                "排序": cat.sort_order,
                "创建时间": format_datetime(cat.created_at)
            }
            for cat in sorted(categories, key=lambda x: x.sort_order)
        ])
        st.dataframe(category_df, use_container_width=True)
    else:
        st.info("暂无分类数据")
    
    st.markdown("---")
    
    # 单值数据
    st.subheader("🔢 单值数据")
    scalar_data = db.query(ExperimentDataScalar).filter(
        ExperimentDataScalar.experiment_run_id == selected_experiment_id
    ).all()
    
    if scalar_data:
        scalar_rows = []
        for scalar in scalar_data:
            # 解析JSON值
            try:
                value = json.loads(scalar.value) if scalar.value else None
            except:
                value = scalar.value
            
            category_name = scalar.category.name if scalar.category else "N/A"
            scalar_rows.append({
                "ID": scalar.id,
                "分类": category_name,
                "名称": scalar.name,
                "值": str(value),
                "单位": scalar.unit or "N/A",
                "数据分组": scalar.data_group or "N/A",
                "描述": scalar.description or "N/A",
                "创建时间": format_datetime(scalar.created_at)
            })
        
        scalar_df = pd.DataFrame(scalar_rows)
        st.dataframe(scalar_df, use_container_width=True)
    else:
        st.info("暂无单值数据")
    
    st.markdown("---")
    
    # 序列数据
    st.subheader("📈 序列数据")
    series_data = db.query(ExperimentDataSeries).filter(
        ExperimentDataSeries.experiment_run_id == selected_experiment_id
    ).all()
    
    if series_data:
        # 显示序列数据列表
        series_rows = []
        for series in series_data:
            category_name = series.category.name if series.category else "N/A"
            series_rows.append({
                "ID": series.id,
                "分类": category_name,
                "名称": series.name,
                "数据长度": series.data_length,
                "单位": series.unit or "N/A",
                "数据分组": series.data_group or "N/A",
                "索引列名": series.index_column_name or "index",
                "Parquet路径": series.parquet_path,
                "创建时间": format_datetime(series.created_at)
            })
        
        series_list_df = pd.DataFrame(series_rows)
        st.dataframe(series_list_df, use_container_width=True)
        
        # 选择要查看的序列数据
        if series_data:
            st.markdown("#### 查看序列数据内容")
            series_options = {
                f"{s.category.name if s.category else 'N/A'} / {s.name}": s.id 
                for s in series_data
            }
            selected_series_name = st.selectbox(
                "选择序列数据",
                options=list(series_options.keys()),
                index=0
            )
            selected_series_id = series_options[selected_series_name]
            selected_series = next(s for s in series_data if s.id == selected_series_id)
            
            # 尝试加载Parquet文件
            parquet_path = Path(EXPERIMENT_RESULTS_ROOT) / selected_series.parquet_path
            df = load_parquet_data(parquet_path)
            
            if df is not None:
                st.write(f"**文件路径**: {parquet_path}")
                st.write(f"**数据形状**: {df.shape}")
                st.dataframe(df, use_container_width=True)
                
                # 如果只有两列（index和value），尝试绘制图表
                if len(df.columns) == 2:
                    try:
                        index_col = df.columns[0]
                        value_col = df.columns[1]
                        st.line_chart(df.set_index(index_col)[value_col])
                    except:
                        pass
            else:
                st.warning(f"无法加载Parquet文件: {parquet_path}")
                if not parquet_path.exists():
                    st.error(f"文件不存在: {parquet_path}")
    else:
        st.info("暂无序列数据")
    
    st.markdown("---")
    
    # 结果文件
    st.subheader("📎 结果文件")
    result_files = selected_experiment.results
    
    if result_files:
        file_rows = []
        for result in result_files:
            file_rows.append({
                "ID": result.id,
                "名称": result.name,
                "文件类型": result.file_type,
                "文件路径": result.file_path,
                "文件大小": f"{result.file_size:,} bytes" if result.file_size else "N/A",
                "描述": result.description or "N/A",
                "创建时间": format_datetime(result.created_at)
            })
        
        file_df = pd.DataFrame(file_rows)
        st.dataframe(file_df, use_container_width=True)
        
        # 文件存在性检查
        st.markdown("#### 文件存在性检查")
        file_check_rows = []
        for result in result_files:
            file_path = Path(EXPERIMENT_RESULTS_ROOT) / result.file_path
            exists = file_path.exists()
            file_check_rows.append({
                "文件名称": result.name,
                "路径": result.file_path,
                "存在": "✅" if exists else "❌",
                "大小": f"{file_path.stat().st_size:,} bytes" if exists else "N/A"
            })
        
        file_check_df = pd.DataFrame(file_check_rows)
        st.dataframe(file_check_df, use_container_width=True)
    else:
        st.info("暂无结果文件")
    
    st.markdown("---")
    
    # 统计信息
    st.subheader("📊 统计信息")
    col1, col2, col3, col4, col5 = st.columns(5)
    with col1:
        st.metric("分类数", len(categories))
    with col2:
        st.metric("单值数据", len(scalar_data))
    with col3:
        st.metric("序列数据", len(series_data))
    with col4:
        st.metric("结果文件", len(result_files))
    with col5:
        total_files = len([f for f in result_files if f.file_size]) if result_files else 0
        total_size = sum([f.file_size for f in result_files if f.file_size]) if result_files else 0
        st.metric("总文件大小", f"{total_size:,} bytes" if total_size else "0")

finally:
    db.close()

