import os
import json
import pandas as pd
import streamlit as st
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import requests
from json_repair import repair_json
import PyPDF2
import uuid
import re
from datetime import datetime
from openai import OpenAI
import ast
import logging
from logging.handlers import RotatingFileHandler

# Initialize logging
def setup_logging():
    # 检查是否已经配置过日志
    if hasattr(setup_logging, '_called'):
        return logging.getLogger()
    setup_logging._called = True
    
    # Create log directory if it doesn't exist
    os.makedirs("log", exist_ok=True)
    
    # 获取logger时指定名称，而不是使用根日志器
    logger = logging.getLogger("test_case_generator")
    logger.setLevel(logging.INFO)
    
    # 如果已经有处理器，直接返回
    if logger.handlers:
        return logger
    
    # Create formatter
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    
    # Create file handler which logs even debug messages
    file_handler = RotatingFileHandler(
        'log/app.log', 
        maxBytes=1024*1024,  # 1MB
        backupCount=5
    )
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    
    # Also log to console
    console_handler = logging.StreamHandler()
    console_handler.setFormatter(formatter)
    logger.addHandler(console_handler)
    
    return logger

# Initialize logger
logger = setup_logging()

Client = OpenAI(
    # api_key='sk-mromcoysswulajdznsslaqxfatyfjuxzlyctpulnokiuovxa',
    api_key='sk-rlxhkzkkzhbpbfpqnflehcbkrnntaexllvywwhpixoithsrb',
    base_url='https://api.siliconflow.cn/v1'
)
SYSTEM_MAPPING = {
    "集中化计划建设管理系统": "pms",
    "集中化报账服务平台": "bz",
    "集中化人力资源管理系统":"ihr",
    "云化统一信息平台（门户业务）":"ymh",
    "云化统一信息平台（综合业务）":"yzh",
    "云化统一信息平台（公文业务）":"ygw",
    "全国统一邮件系统":"ty",
    "集中化研发管理系统":"yf",
    "PMS-IT公司计划建设协同管理":"pmsxt",
    "集中化预算管理系统":"ys",
    "集中化司库管理系统":"sk",
    "集中化合同管理系统":"ht",
    "集中化供应链管理系统":"gyl",
    "集中化成本管理系统":"cb",
    "集中化资产管理系统":"zc"
}
def load_cases(system_code):
    logger.info(f"Loading test cases for system: {system_code}")
    # 使用os.path构建更安全的路径
    dir_path = os.path.join("data", system_code)  # 建议使用相对路径或配置基础路径
    csv_path = os.path.join(dir_path, f"{system_code}_test_cases.csv")
    
    try:
        # 确保目录存在
        os.makedirs(dir_path, exist_ok=True)
        
        # 如果文件不存在，创建并初始化
        if not os.path.exists(csv_path):
            logger.info(f"Test cases file not found, creating new at: {csv_path}")
            with open(csv_path, 'w', encoding='utf-8') as f:
                f.write("需求描述,测试用例\n")
            return pd.DataFrame(columns=['需求描述', '测试用例'])
        logger.info(f"Loading test cases from: {csv_path}")   
        # 读取现有文件
        df = pd.read_csv(csv_path)
        
        # 验证列是否存在
        required_columns = ['需求描述', '测试用例']
        if not all(col in df.columns for col in required_columns):
            logger.warning(f"CSV file missing required columns (needed {required_columns})")
            st.warning(f"CSV文件缺少必要的列（需要{required_columns}）。请检查文件格式。")
            return pd.DataFrame(columns=required_columns)
        
        # 解析JSON数据
        def safe_parse_json(json_str):
            if pd.isna(json_str):
                return []
            try:
                # 确保输入是字符串
                json_str = str(json_str).strip()
                if not json_str:
                    return []
                # 替换单引号为双引号
                # json_str = json_str.replace("'", "\"")
                # return json.loads(json_str)
                return ast.literal_eval(json_str)
            except json.JSONDecodeError as e:
                logger.warning(f"JSON parse error: {e}, data: {json_str[:100]}...")
                st.warning(f"JSON解析错误: {e}, 数据: {json_str[:100]}...")
                return []
            except Exception as e:
                logger.error(f"Unexpected error processing test cases: {e}")
                st.warning(f"处理测试用例时发生意外错误: {e}")
                return []
        
        df['测试用例'] = df['测试用例'].apply(safe_parse_json)
        logger.info(f"Successfully loaded {len(df)} test cases")
        return df
        
    except pd.errors.EmptyDataError:
        logger.warning("CSV file is empty, reinitializing")
        st.warning("CSV文件为空，已重新初始化。")
        return pd.DataFrame(columns=['需求描述', '测试用例'])
    except PermissionError as e:
        logger.error(f"Permission error: {e}. Cannot access file {csv_path}")
        st.error(f"权限错误: {e}。无法访问文件 {csv_path}。")
        return pd.DataFrame(columns=['需求描述', '测试用例'])
    except Exception as e:
        logger.error(f"Failed to load CSV file: {str(e)}")
        st.error(f"加载CSV文件失败: {str(e)}")
        return pd.DataFrame(columns=['需求描述', '测试用例'])

def load_knowledge_segments(system_code):
    # 使用os.path.join构建更安全的路径（改为相对路径或可配置的基础路径）
    logger.info(f"Loading knowledge segments for system: {system_code}")
    dir_path = os.path.join("data", system_code)  # 存储在data目录下
    csv_path = os.path.join(dir_path, f"{system_code}_knowledge_segments.csv")
    
    try:
        # 确保目录存在（自动逐级创建）
        os.makedirs(dir_path, exist_ok=True)
        
        # 如果文件不存在，返回空DataFrame（不自动创建文件）
        if not os.path.exists(csv_path):
            logger.info(f"Knowledge segments file not found at: {csv_path}")
            return pd.DataFrame(columns=['segment_id', 'document_name', 'page_num', 'content'])
            
        # 读取现有文件
        logger.info(f"Loading knowledge segments from: {csv_path}")
        df = pd.read_csv(csv_path)
        
        # 验证必要的列是否存在
        required_columns = ['segment_id', 'document_name', 'page_num', 'content']
        if not all(col in df.columns for col in required_columns):
            logger.warning(f"CSV file missing required columns, returning empty DataFrame. Needed columns: {required_columns}")
            st.warning(f"CSV文件缺少必要的列，将返回空DataFrame。需要的列: {required_columns}")
            return pd.DataFrame(columns=required_columns)
        logger.info(f"Successfully loaded {len(df)} knowledge segments")    
        return df
        
    except pd.errors.EmptyDataError:
        logger.warning("CSV file is empty, returning empty DataFrame")
        st.warning("CSV文件为空，将返回空DataFrame。")
        return pd.DataFrame(columns=['segment_id', 'document_name', 'page_num', 'content'])
    except PermissionError as e:
        logger.error(f"Permission error: {e}. Cannot access file {csv_path}")
        st.error(f"权限错误: {e}。无法访问文件 {csv_path}。")
        return pd.DataFrame(columns=['segment_id', 'document_name', 'page_num', 'content'])
    except Exception as e:
        logger.error(f"Failed to load knowledge base: {str(e)}")
        st.error(f"加载知识库失败: {str(e)}")
        return pd.DataFrame(columns=['segment_id', 'document_name', 'page_num', 'content'])
    
def process_pdf(uploaded_file):
    logger.info(f"Processing PDF file: {uploaded_file.name}")
    filename = f"{datetime.now().strftime('%Y%m%d%H%M%S')}_{uuid.uuid4().hex[:8]}.pdf"
    filepath = os.path.join("temp", filename)
    
    os.makedirs("temp", exist_ok=True)
    
    with open(filepath, "wb") as f:
        f.write(uploaded_file.getbuffer())
    
    segments = []
    try:
        logger.info(f"Reading PDF file: {filepath}")
        pdf_reader = PyPDF2.PdfReader(filepath)
        total_pages = len(pdf_reader.pages)
        logger.info(f"PDF has {total_pages} pages")

        for page_num in range(total_pages):
            page = pdf_reader.pages[page_num]
            text = page.extract_text()
            
            paragraphs = re.split(r'\n\s*\n', text)
            for i, paragraph in enumerate(paragraphs):
                paragraph = paragraph.strip()
                if len(paragraph) > 20:
                    segments.append({
                        'segment_id': f"{filename}_{page_num}_{i}",
                        'document_name': uploaded_file.name,
                        'page_num': page_num + 1,
                        'content': paragraph
                    })
        logger.info(f"Extracted {len(segments)} segments from PDF")
        return segments, len(segments), total_pages
    except Exception as e:
        logger.error(f"PDF processing failed: {str(e)}")
        st.error(f"PDF处理失败：{str(e)}")
        if os.path.exists(filepath):
            os.remove(filepath)
        return [], 0, 0


def save_knowledge_segments(segments, system_code, csv_path="knowledge_segments.csv"):
    logger.info(f"Saving {len(segments)} knowledge segments for system: {system_code}")
    # 使用os.path.join构建更安全的路径（改为相对路径或可配置的基础路径）
    dir_path = os.path.join("data", system_code)  # 存储在data目录下
    total_csv_path = os.path.join(dir_path, f"{system_code}_{csv_path}")
    
    try:
        # 确保目录存在（自动逐级创建）
        os.makedirs(dir_path, exist_ok=True)
        
        # 创建新的DataFrame
        df_new = pd.DataFrame(segments)
        
        # 如果文件存在，读取并合并数据
        if os.path.exists(total_csv_path):
            try:
                logger.info(f"Merging with existing knowledge segments at: {total_csv_path}")
                df_existing = pd.read_csv(total_csv_path)
                df_combined = pd.concat([df_existing, df_new], ignore_index=True)
            except pd.errors.EmptyDataError:
                # 处理空文件情况
                logger.warning("Existing knowledge segments file is empty, using new data only")
                df_combined = df_new
        else:
            df_combined = df_new
        
        # 保存合并后的数据
        df_combined.to_csv(total_csv_path, index=False)
        logger.info(f"Successfully saved {len(df_combined)} knowledge segments")
        return len(df_combined)
        
    except PermissionError as e:
        logger.error(f"Permission denied writing file: {total_csv_path}")
        raise PermissionError(f"无权限写入文件: {total_csv_path}") from e
    except Exception as e:
        logger.error(f"Failed to save knowledge segments: {str(e)}")
        raise RuntimeError(f"保存知识片段失败: {str(e)}") from e
    
def save_test_cases(system_code, requirement, test_cases):
    """
    保存生成的测试用例到CSV文件
    :param system_code: 系统代码
    :param requirement: 需求描述
    :param test_cases: 测试用例列表
    :return: 保存后的总用例数
    """
    logger.info(f"Saving {len(test_cases)} test cases for system: {system_code}")
    # 构建安全的文件路径
    dir_path = os.path.join("data", system_code)
    csv_path = os.path.join(dir_path, f"{system_code}_test_cases.csv")
    
    try:
        # 确保目录存在
        os.makedirs(dir_path, exist_ok=True)
        data_to_save = []
        
        # 遍历每个测试用例
        for case in test_cases:
            data_to_save.append({
                '需求描述': requirement,
                '测试用例': case
                # '测试用例': json.dumps(case, ensure_ascii=False)
            })
        df_new = pd.DataFrame(data_to_save)
        
        # 如果文件存在，读取并合并数据
        if os.path.exists(csv_path):
            try:
                logger.info(f"Merging with existing test cases at: {csv_path}")
                df_existing = pd.read_csv(csv_path)
                df_combined = pd.concat([df_existing, df_new], ignore_index=True)
            except pd.errors.EmptyDataError:
                df_combined = df_new
        else:
            df_combined = df_new
        
        # 保存数据
        df_combined.to_csv(csv_path, index=False, encoding='utf-8')
        logger.info(f"Successfully saved {len(df_combined)} test cases")
        return len(df_combined)
        
    except PermissionError as e:
        logger.error(f"Permission denied writing file: {csv_path}")
        raise PermissionError(f"无权限写入文件: {csv_path}") from e
    except Exception as e:
        logger.error(f"Failed to save test cases: {str(e)}")
        raise RuntimeError(f"保存测试用例失败: {str(e)}") from e
    
#基于文本相似度的测试用例推荐，通过计算新需求与已有测试用例之间的文本相似度，返回最相似的top_k个测试用例
def find_similar_cases(new_req, df, top_k=3):
    logger.info(f"Finding similar cases for new requirement (top_k={top_k})")
    if df.empty:
        logger.info("No existing cases to compare with")
        return []
    vectorizer = TfidfVectorizer()
    tfidf_matrix = vectorizer.fit_transform(df['需求描述'].tolist() + [new_req])
    similarity = cosine_similarity(tfidf_matrix[-1], tfidf_matrix[:-1])
    top_indices = similarity.argsort()[0][-top_k:][::-1]
    logger.info(f"Found {len(top_indices)} similar cases")
    return [df.iloc[i]['测试用例'] for i in top_indices]

#基于文本相似度的知识库检索，核心功能是根据用户的查询，从知识库中找出topk个最相关的知识条目
def find_relevant_knowledge(query, knowledge_df, top_k=2):
    logger.info(f"Finding relevant knowledge for query (top_k={top_k})")
    if knowledge_df.empty:
        logger.info("Knowledge base is empty")
        return []
    
    vectorizer = TfidfVectorizer()
    documents = knowledge_df['content'].tolist()
    try:
        tfidf_matrix = vectorizer.fit_transform(documents + [query])
        similarity = cosine_similarity(tfidf_matrix[-1], tfidf_matrix[:-1])
        top_indices = similarity.argsort()[0][-top_k:][::-1]
        
        results = []
        for idx in top_indices:
            segment = knowledge_df.iloc[idx]
            results.append({
                'document': segment['document_name'],
                'page': segment['page_num'],
                'content': segment['content']
            })
        logger.info(f"Found {len(results)} relevant knowledge segments")    
        return results
    except Exception as e:
        logger.error(f"Knowledge search failed: {str(e)}")
        st.error(f"知识搜索失败：{str(e)}")
        return []

def generate_test_cases(prompt, history_cases=None, knowledge_segments=None, max_cases=10, temp=0.7, use_enhancement=True):
    # headers = {"Authorization": "Bearer sk-rlxhkzkkzhbpbfpqnflehcbkrnntaexllvywwhpixoithsrb", 
    #            "Content-Type": "application/json"}
    logger.info(f"Generating test cases (max_cases={max_cases}, temp={temp}, enhancement={use_enhancement})")
    system_prompt = ""
    
    if use_enhancement and (history_cases or knowledge_segments):
        context = "\n".join([f"历史用例{idx+1}: {case}" for idx, case in enumerate(history_cases or [])]) if history_cases else ""
        
        knowledge_context = ""
        if knowledge_segments and len(knowledge_segments) > 0:
            knowledge_context = "参考知识：\n" + "\n\n".join([
                f"文档《{item['document']}》第{item['page']}页：{item['content']}"
                for item in knowledge_segments
            ])
        
        system_prompt = f"""你是一名测试老司机，请基于以下历史用例和知识库生成{max_cases}条新用例：
{context}

{knowledge_context}

要求：
1. 输出格式为JSON数组，每个对象包含字段：
   - 用例编号（格式TC-模块-序号，如TC-LOGIN-01）
   - 步骤（简明步骤描述）
   - 预期（预期结果）
   - 优先级（1-5，1为最高）
2. 包含正向和异常场景
3. 按优先级从高到低排序
4. 仅返回合法JSON，不要额外解释"""
    else:
        system_prompt = f"""你是一名资深测试工程师，请为以下需求生成{max_cases}条测试用例：

要求：
1. 输出格式为JSON数组，每个对象包含字段：
   - 用例编号（格式TC-模块-序号，如TC-LOGIN-01）
   - 步骤（简明步骤描述）
   - 预期（预期结果）
   - 优先级（1-5，1为最高）
2. 包含正向和异常场景
3. 按优先级从高到低排序
4. 仅返回合法JSON，不要额外解释"""
    
    try:
        logger.info("Calling OpenAI API to generate test cases")
        
        start = datetime.now()  # 记录开始时间（datetime对象）
        response = Client.chat.completions.create(
            messages=[
                {"role": "system", "content": system_prompt},
                {"role": "user", "content": prompt}
            ],
            temperature = temp,
            response_format={"type": "json_object"},
            model='deepseek-ai/DeepSeek-V3',
            max_tokens=4096,
            stream=False,
        )
        logger.info(f"系统prompt:\n{system_prompt}\n\n用户prompt:\n{prompt}")
        end = datetime.now()
        time_diff = (end - start).total_seconds() 
        logger.info(f"OpenAI API call completed in {time_diff:.2f} seconds")
        content = response.choices[0].message.content
        logger.debug(f"Raw API response: {content}")
        # Try to parse the JSON
        try:
            parsed_content = json.loads(repair_json(content))
            logger.info("Successfully parsed test cases from API response")
            return parsed_content
        except json.JSONDecodeError as e:
            logger.error(f"Failed to parse JSON from API response: {e}")
            logger.debug(f"Problematic content: {content}")
            st.error("AI返回的数据格式不正确，请尝试重新生成")
            return []
    except Exception as e:
        logger.error(f"AI generation failed: {str(e)}")
        st.error(f"AI罢工了：{str(e)}（检查API_KEY是否正确？）")
        return []

def apply_custom_styles():
    st.markdown("""
    <style>
        .main .block-container {
            padding-top: 2rem;
            position: relative;
        }
        
        h1, h2, h3 {
            color: #1E3A8A;
        }
        
        .css-card {
            border-radius: 10px;
            padding: 20px;
            margin-bottom: 15px;
            background-color: white;
            box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
        }
        
        .upload-section {
            border: 2px dashed #3B82F6;
            border-radius: 10px;
            padding: 20px;
            text-align: center;
            margin-bottom: 20px;
            background-color: #F3F4F6;
        }
        
        .info-box {
            background-color: #E0F2FE;
            padding: 10px 15px;
            border-radius: 6px;
            margin-bottom: 15px;
        }
        
        .success-box {
            background-color: #D1FAE5;
            padding: 10px 15px;
            border-radius: 6px;
            margin-bottom: 15px;
        }
    </style>
    """, unsafe_allow_html=True)
    
    st.markdown("""

    
    <div style="
        position: fixed;
        top: 0;
        left: 0;
        width: 100vw;
        height: 100vh;
        z-index: -2;
        pointer-events: none;
        background-image: repeating-linear-gradient(
            45deg,
            rgba(180, 180, 180, 0.05),
            rgba(180, 180, 180, 0.05) 150px,
            rgba(180, 180, 180, 0.1) 150px,
            rgba(180, 180, 180, 0.1) 300px
        );
    "></div>
    """, unsafe_allow_html=True)
def display_results(cases):
    """结果展示组件"""
    if not cases:
        return
        
    tab1, tab2, tab3 = st.tabs(["表格视图", "JSON数据", "统计信息"])
    
    with tab1:
        df = pd.DataFrame(cases)
        df["优先级"] = df["优先级"].astype("category")
        st.dataframe(df, use_container_width=True)
        
    with tab2:
        st.code(json.dumps(cases, indent=2, ensure_ascii=False))
        
    with tab3:
        st.subheader("用例分布")
        priority_dist = df["优先级"].value_counts().sort_index()
        st.bar_chart(priority_dist)

def main():
    logger.info("Starting application")
    st.set_page_config(page_title="🤖 AI测试小秘书", layout="wide")
    apply_custom_styles()
    
    st.title("💡 AI测试用例生成工具（内置知识库）")
    st.markdown("<p style='color:#4B5563;'>上传领域文档，设计出更专业的测试用例</p>", unsafe_allow_html=True)
    system_name=st.selectbox(
        "请选择待测系统名称：",
        options=["","集中化计划建设管理系统","集中化报账服务平台",
                    "集中化人力资源管理系统",
                    "云化统一信息平台（门户业务）",
                    "云化统一信息平台（综合业务）",
                    "云化统一信息平台（公文业务）",
                    "全国统一邮件系统",
                    "集中化研发管理系统",
                    "PMS-IT公司计划建设协同管理",
                    "集中化预算管理系统",
                    "集中化司库管理系统",
                    "集中化合同管理系统",
                    "集中化供应链管理系统",
                    "集中化成本管理系统",
                    "集中化资产管理系统"],
        index=0    
    )
    # 当系统切换时重置计数
    if 'prev_system' not in st.session_state:
        st.session_state.prev_system = None
    if system_name != st.session_state.prev_system:
        logger.info(f"System changed to: {system_name}")
        st.session_state.prev_system = system_name
        st.session_state.knowledge_segments_count = 0  # 先重置为0
        st.session_state.test_case_count = 0
        if system_name:  # 如果有选择系统则加载数据
            system_code = SYSTEM_MAPPING[system_name]
            knowledge_df = load_knowledge_segments(system_code)
            st.session_state.knowledge_segments_count = len(knowledge_df)
            test_cases_df = load_cases(system_code)
            st.session_state.test_case_count = len(test_cases_df)
        st.experimental_rerun()
    st.session_state['system_name']=system_name
    if 'knowledge_segments_count' not in st.session_state:
        st.session_state.knowledge_segments_count = 0
        
    
    # 检查用户是否已选择系统
    if not system_name:
        st.warning("请先选择待测试的系统")

    else:
         # 获取系统代号
        system_code = SYSTEM_MAPPING[system_name]
        
        # 存入session_state供后续使用
        st.session_state['system_code'] = system_code
        
        st.success(f"已选择系统：{system_name}")
        knowledge_df=load_knowledge_segments(system_code)
        if not knowledge_df.empty:
            st.session_state.knowledge_segments_count = len(knowledge_df)
        
        test_cases_df = load_cases(system_code)
        
        tab1, tab2 = st.tabs(["📝 生成测试用例", "📚 知识库管理"])
        
        with tab1:
            col1, col2 = st.columns([2, 1])
            
            with col1:
                st.markdown("<h3>输入需求描述</h3>", unsafe_allow_html=True)
                
                with st.form("magic_form"):
                    user_input = st.text_area("描述你的测试需求", height=150,
                                            placeholder="示例：购物车需支持添加商品、库存不足提示、批量删除")
                    
                    col_button1, col_button2 = st.columns([1, 3])
                    with col_button1:
                        submitted = st.form_submit_button("✨ 生成测试用例", use_container_width=True)
                    with col_button2:
                        use_knowledge = st.checkbox("使用知识库增强", value=True, help="勾选后将使用知识库和历史用例增强测试用例生成")
            
            with col2:
                st.markdown("<h3>知识库状态</h3>", unsafe_allow_html=True)
                # 实时显示当前系统的知识库计数
                current_count = st.session_state.knowledge_segments_count
                current_case_count=st.session_state.test_case_count
                st.markdown(f"""
                <div class="info-box">
                    <p><b>📊 知识库统计</b></p>
                    <p>• 文档段落：{current_count} 条</p>
                    <p>• 历史用例：{current_case_count} 条</p>
                </div>
                """, unsafe_allow_html=True)
                
                with st.expander("如何获得更好的结果？"):
                    st.markdown("""
                    1. **上传领域文档**：在"知识库管理"选项卡上传相关PDF
                    2. **描述需求细节**：越详细的需求描述越能获得精准的测试用例
                    3. **迭代优化**：基于生成结果，调整需求描述再次生成
                    """)
        
            if submitted and user_input:
                logger.info(f"Generating test cases for requirement: {user_input[:100]}...")
                start = datetime.now()  # 记录开始时间（datetime对象）
                if use_knowledge:
                    with st.spinner("🔍 正在搜索相关知识..."):
                        similar_cases = find_similar_cases(user_input, test_cases_df)
                        relevant_knowledge = find_relevant_knowledge(user_input, knowledge_df)
                    
                    with st.spinner("🤖 AI正在生成增强测试用例..."):
                        new_cases = generate_test_cases(user_input, similar_cases, relevant_knowledge, use_enhancement=True)
                    
                    st.success("✅ 测试用例生成完成！(使用知识库增强)")
                    end = datetime.now()
                    time_diff = (end - start).total_seconds() 
                    logger.info(f"Test case generation completed in {time_diff:.2f} seconds")
                    # 保存生成的测试用例
                    try:
                        saved_count = save_test_cases(system_code, user_input, new_cases)
                        st.session_state.test_case_count = saved_count
                        logger.info(f"Saved {len(new_cases)} test cases, total now: {saved_count}")
                        st.info(f"已保存测试用例到知识库，当前总数: {saved_count}")
                    except Exception as e:
                        logger.error(f"Failed to save test cases: {str(e)}")
                        st.error(f"保存测试用例失败: {str(e)}")

                    if relevant_knowledge:
                        with st.expander("📑 参考的领域知识", expanded=True):
                            for i, segment in enumerate(relevant_knowledge):
                                st.markdown(f"""
                                <div style="margin-bottom: 10px; padding: 10px; border-left: 3px solid #3B82F6; background-color: #F3F4F6;">
                                    <p><b>文档：</b>{segment['document']} (第{segment['page']}页)</p>
                                    <p>{segment['content']}</p>
                                </div>
                                """, unsafe_allow_html=True)
                    
                    if similar_cases:
                        with st.expander("👉 参考的历史用例", expanded=True):
                            st.json(similar_cases)
                else:
                    with st.spinner("🤖 AI正在生成基础测试用例..."):
                        new_cases = generate_test_cases(user_input, use_enhancement=False)
                    
                    st.success("✅ 测试用例生成完成！(仅使用原始需求)")
                
                st.subheader("🎯 生成的测试用例")
                # st.dataframe(pd.DataFrame(new_cases), use_container_width=True)
                display_results(new_cases)
                
        with tab2:
            st.markdown("<h3>📤 上传知识文档</h3>", unsafe_allow_html=True)
            
            upload_col1, upload_col2 = st.columns([3, 1])
            
            with upload_col1:
                uploaded_file = st.file_uploader("上传PDF文档（将被分割并存入知识库）", type="pdf")
                
            with upload_col2:
                if uploaded_file is not None:
                    if st.button("处理文档", type="primary", use_container_width=True):
                        with st.spinner("正在处理PDF文档..."):
                            logger.info(f"Processing uploaded PDF: {uploaded_file.name}")
                            segments, segment_count, page_count = process_pdf(uploaded_file)
                            if segments:
                                try:
                                    total_count = save_knowledge_segments(segments,system_code)
                                    st.session_state.knowledge_segments_count = total_count
                                    logger.info(f"Successfully processed PDF: {segment_count} segments from {page_count} pages")
                                    st.success(f"✅ 文档处理完成！从 {page_count} 页中提取了 {segment_count} 个知识段落。")
                                    knowledge_df = load_knowledge_segments(system_code)
                                    st.experimental_rerun()
                                except Exception as e:
                                    logger.error(f"Failed to save knowledge segments: {str(e)}")
                                    st.error(f"保存知识段落失败: {str(e)}")
            
            if not knowledge_df.empty:
                st.markdown("<h3>📚 知识库内容</h3>", unsafe_allow_html=True)
                
                docs = knowledge_df['document_name'].unique()
                st.markdown(f"当前知识库包含 **{len(docs)}** 个文档，共 **{len(knowledge_df)}** 个知识段落。")
                
                selected_doc = st.selectbox("选择要查看的文档", ["所有文档"] + list(docs))
                
                if selected_doc == "所有文档":
                    display_df = knowledge_df
                else:
                    display_df = knowledge_df[knowledge_df['document_name'] == selected_doc]
                
                for _, row in display_df.head(20).iterrows():
                    st.markdown(f"""
                    <div style="margin-bottom: 10px; padding: 15px; border-radius: 8px; background-color: #F8FAFC; border: 1px solid #E2E8F0;">
                        <p style="margin:0; color: #6B7280; font-size: 0.8rem;">文档：{row['document_name']} | 第{row['page_num']}页</p>
                        <p style="margin-top: 8px;">{row['content']}</p>
                    </div>
                    """, unsafe_allow_html=True)
                
                if len(display_df) > 20:
                    st.info(f"仅显示前20条记录，共 {len(display_df)} 条")

if __name__ == "__main__":
    main()