#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""嵌入服务路由模块

此模块提供了文档、图片、文本和网页URL的嵌入功能，包括单文件处理和批量处理能力。
所有路由函数都采用异步处理方式，并通过线程池执行同步操作以避免阻塞事件循环。
"""
import os
import logging
import tempfile
import json
import datetime
from typing import List
from fastapi import FastAPI, UploadFile, File, Form, Depends, HTTPException
from fastapi.responses import JSONResponse
from data_models.models import TextEmbeddingRequest, ParseWebURLRequest, ParseWebURLBatchRequest
from utils.async_utils import run_in_thread_pool

# 导入统一的日志配置模块
from log_config import setup_logging, shutdown_logging
# 导入配置服务
from services.config_service import ConfigService
# 配置日志
setup_logging()
logger = logging.getLogger(__name__)

# 从配置文件中读取上传目录配置
config_service = ConfigService.get_instance()
UPLOADS_DIR = config_service.get('upload', 'upload_dir', fallback='uploads')
os.makedirs(UPLOADS_DIR, exist_ok=True)
logger.info(f"上传目录已设置为: {UPLOADS_DIR}")

HISTORY_DIR = config_service.get('history', 'history_dir', fallback='history')
os.makedirs(HISTORY_DIR, exist_ok=True)
logger.info(f"历史目录已设置为: {HISTORY_DIR}")

# 辅助函数：保存doc文件到指定目录
def save_doc_file_to_directory(file_content, filename, category):
    try:
        # 获取当前时间戳
        timestamp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S_%f')
        
        # 确定子目录
        sub_dir = 'docs'
        
        # 构建保存路径
        category_dir = os.path.join(UPLOADS_DIR, category) if category else os.path.join(UPLOADS_DIR, 'uncategorized')
        # 获取当前日期（年月日格式）
        current_date = datetime.datetime.now().strftime('%Y%m%d')
        target_dir = os.path.join(category_dir, sub_dir, current_date)
        os.makedirs(target_dir, exist_ok=True)
        
        # 构建文件名（原始文件名 + 时间戳）
        original_name, extension = os.path.splitext(filename)
        new_filename = f"{original_name}_{timestamp}{extension}"
        file_path = os.path.join(target_dir, new_filename)
        
        # 检查是否存在同名同类型文件（去掉时间戳后比较）
        # 当前文件的基础名称（不含时间戳）
        current_base_name = f"{original_name}{extension}"
        current_file_size = len(file_content)
        
        # 遍历目标目录查找匹配文件
        for existing_file in os.listdir(target_dir):
            # 提取基础文件名（去掉时间戳）
            existing_name, existing_ext = os.path.splitext(existing_file)
            # 检查扩展名是否匹配
            if existing_ext == extension:
                # 尝试提取原始文件名（假设格式为 "原始名称_时间戳"）
                # 使用正则表达式识别时间戳部分 (格式: YYYYMMDD_HHMMSS_ffffff)
                import re
                timestamp_pattern = r'_\d{8}_\d{6}_\d{6}$'
                base_name_candidate = existing_name + existing_ext
                
                # 检查文件名是否包含时间戳格式
                if re.search(timestamp_pattern, existing_name):
                    # 移除时间戳部分获取基础文件名
                    base_name_without_timestamp = re.sub(timestamp_pattern, '', existing_name)
                    base_name_candidate = base_name_without_timestamp + existing_ext
                
                # 比较基础文件名和文件大小
                if base_name_candidate == current_base_name:
                    existing_file_path = os.path.join(target_dir, existing_file)
                    if os.path.getsize(existing_file_path) == current_file_size:
                        logger.info(f"找到相同文件，直接复用: {existing_file}")
                        return existing_file_path, existing_file
        
        # 如果没有找到匹配的文件，保存新文件
        with open(file_path, "wb") as buffer:
            buffer.write(file_content)
        
        return file_path, new_filename
    except Exception as e:
        logger.error(f"保存文件时出错: {str(e)}")
        raise

# 辅助函数：保存文本内容到文件
def save_text_to_file(text, category):
    try:
        # 获取当前时间戳
        timestamp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S_%f')
        
        # 确定子目录
        sub_dir = 'txts'

        # 构建保存路径
        category_dir = os.path.join(UPLOADS_DIR, category) if category else os.path.join(UPLOADS_DIR, 'uncategorized')
        # 获取当前日期（年月日格式）
        current_date = datetime.datetime.now().strftime('%Y%m%d')
        target_dir = os.path.join(category_dir, sub_dir, current_date)
        os.makedirs(target_dir, exist_ok=True)
        
        # 构建文件名
        new_filename = f"text_{timestamp}.txt"
        file_path = os.path.join(target_dir, new_filename)
        
        # 检查是否存在相同内容的文件
        current_content_bytes = text.encode('utf-8')
        current_file_size = len(current_content_bytes)
        
        # 当前文件的基础名称（不含时间戳）
        current_base_name = "text.txt"
        
        # 遍历目标目录查找匹配文件
        import re
        timestamp_pattern = r'_\d{8}_\d{6}_\d{6}$'
        
        if os.path.exists(target_dir):
            for existing_file in os.listdir(target_dir):
                if existing_file.endswith('.txt'):
                    existing_file_path = os.path.join(target_dir, existing_file)
                    # 提取基础文件名（去掉时间戳）
                    existing_name, existing_ext = os.path.splitext(existing_file)
                    # 检查文件名是否包含时间戳格式
                    if re.search(timestamp_pattern, existing_name):
                        # 移除时间戳部分获取基础文件名
                        base_name_without_timestamp = re.sub(timestamp_pattern, '', existing_name)
                        base_name_candidate = base_name_without_timestamp + existing_ext
                    else:
                        base_name_candidate = existing_file
                    
                    # 比较基础文件名和文件大小
                    if base_name_candidate == current_base_name or existing_file.startswith('text_'):
                        if os.path.getsize(existing_file_path) == current_file_size:
                            # 如果文件大小相同，读取内容进行比较
                            try:
                                with open(existing_file_path, "r", encoding="utf-8") as f:
                                    existing_content = f.read()
                                if existing_content == text:
                                    logger.info(f"找到相同内容的文本文件，直接复用: {existing_file}")
                                    return existing_file_path, existing_file
                            except:
                                # 读取文件失败，跳过
                                continue
        
        # 如果没有找到匹配的文件，保存新文件
        with open(file_path, "w", encoding="utf-8") as f:
            f.write(text)
        
        return file_path, new_filename
    except Exception as e:
        logger.error(f"保存文本时出错: {str(e)}")
        raise

# 辅助函数：保存URL到文件

# 辅助函数：保存图片文件到指定目录
def save_images_to_directory(files, category, description):
    try:
        # 获取当前时间戳
        timestamp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S_%f')
        
        # 构建保存路径
        category_dir = os.path.join(UPLOADS_DIR, category) if category else os.path.join(UPLOADS_DIR, 'uncategorized')
        # 创建子目录结构: category/images/timestamp
        target_dir = os.path.join(category_dir, 'images', timestamp)
        os.makedirs(target_dir, exist_ok=True)
        
        # 保存描述文件
        desc_file_path = os.path.join(target_dir, 'desc.txt')
        if description:
            with open(desc_file_path, "w", encoding="utf-8") as f:
                f.write(description)
            logger.info(f"描述文件已保存到: {desc_file_path}")
        
        # 保存所有图片文件（不改名）
        saved_file_paths = []
        for file in files:
            # 构建文件路径
            file_path = os.path.join(target_dir, file.filename)
            
            # 读取文件内容
            file_content = file.file.read()
            
            # 保存文件
            with open(file_path, "wb") as buffer:
                buffer.write(file_content)
            
            saved_file_paths.append(file_path)
            logger.info(f"图片文件已保存到: {file_path}")
        
        # 返回主要保存路径和时间戳作为文件名标识
        return target_dir, timestamp
    except Exception as e:
        logger.error(f"保存图片文件时出错: {str(e)}")
        raise

def save_url_to_file(category, url):
    try:
        # 构建保存路径
        category_dir = os.path.join(UPLOADS_DIR, category) if category else os.path.join(UPLOADS_DIR, 'uncategorized')
        # 获取当前日期（年月日格式）
        current_date = datetime.datetime.now().strftime('%Y%m%d')
        target_dir = os.path.join(category_dir, 'urls')
        os.makedirs(target_dir, exist_ok=True)
        
        # 创建以当前日期命名的文件名
        filename = f"{current_date}.txt"
        file_path = os.path.join(target_dir, filename)
        
        # 检查文件是否存在
        file_exists = os.path.exists(file_path)
        
        # 检查URL是否已存在于文件中
        url_exists = False
        if file_exists:
            try:
                with open(file_path, "r", encoding="utf-8") as f:
                    existing_urls = f.readlines()
                    # 去除每行末尾的换行符并比较
                    for existing_url in existing_urls:
                        if existing_url.strip() == url.strip():
                            url_exists = True
                            logger.info(f"URL已存在于文件中: {file_path}")
                            return file_path, filename
            except Exception as e:
                logger.warning(f"读取URL文件时出错: {str(e)}")
                # 继续执行，创建/追加文件
        
        # 保存URL到文件（追加模式）
        mode = "a" if file_exists else "w"
        with open(file_path, mode, encoding="utf-8") as f:
            if not file_exists or f.tell() > 0:
                # 如果是追加模式且文件不为空，先添加换行符
                f.write("\n")
            f.write(url)
        
        action = "追加" if file_exists else "创建"
        logger.info(f"成功{action}URL到文件: {filename}")
        return file_path, filename
    except Exception as e:
        logger.error(f"保存URL时出错: {str(e)}")
        raise

# 辅助函数：保存上传历史记录
def save_upload_history(upload_type, category, files_info, additional_info=None):
    """保存上传历史记录
    
    Args:
        upload_type: 上传类型 (doc/image/text/weburl)
        category: 上传类别
        files_info: 文件信息列表
        additional_info: 额外信息字典
    
    特性：
        - 所有上传记录保存到一个JSON文件中
        - 文件名使用创建时间
        - 当记录数超过100时，创建新文件
    """
    try:
        # 获取当前时间
        current_time = datetime.datetime.now()
        time_str = current_time.isoformat()
        
        # 构建历史记录目录（按年月组织）
        history_year_month = current_time.strftime('%Y%m')
        history_dir = os.path.join(HISTORY_DIR, history_year_month)
        os.makedirs(history_dir, exist_ok=True)
        
        # 获取或创建当前历史文件
        history_file_path, records_count = _get_current_history_file(history_dir)
        
        # 构建历史记录内容
        history_entry = {
            "id": f"{upload_type}_{current_time.strftime('%Y%m%d_%H%M%S_%f')}",
            "upload_type": upload_type,
            "category": category or "uncategorized",
            "upload_time": time_str,
            "total_items": len(files_info),
            "files_info": files_info
        }
        
        # 添加额外信息
        if additional_info:
            history_entry.update(additional_info)
        
        # 读取现有记录或创建新列表
        if os.path.exists(history_file_path):
            with open(history_file_path, "r", encoding="utf-8") as f:
                all_records = json.load(f)
        else:
            all_records = []
        
        # 添加新记录
        all_records.append(history_entry)
        
        # 保存更新后的记录
        with open(history_file_path, "w", encoding="utf-8") as f:
            json.dump(all_records, f, ensure_ascii=False, indent=2)
        
        logger.info(f"上传历史记录已保存到: {history_file_path}, 当前记录数: {len(all_records)}")
        return history_file_path
    except Exception as e:
        logger.error(f"保存上传历史记录时出错: {str(e)}")


def _get_current_history_file(history_dir):
    """获取当前应使用的历史记录文件路径和记录数
    
    规则：
    - 按年月组织目录
    - 文件名格式: upload_history_YYYYMMDD_HHMMSS.json
    - 每个文件最多保存100条记录
    """
    # 查找目录中已有的历史文件，按创建时间排序
    history_files = []
    for filename in os.listdir(history_dir):
        if filename.startswith("upload_history_") and filename.endswith(".json"):
            file_path = os.path.join(history_dir, filename)
            try:
                with open(file_path, "r", encoding="utf-8") as f:
                    records = json.load(f)
                    records_count = len(records)
                    # 如果文件存在但记录数少于100，返回该文件
                    if records_count < 100:
                        return file_path, records_count
                    history_files.append((file_path, records_count))
            except (json.JSONDecodeError, IOError):
                # 文件损坏或无法读取，跳过
                continue
    
    # 如果没有找到合适的文件或所有文件都已满100条记录，创建新文件
    timestamp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
    new_filename = f"upload_history_{timestamp}.json"
    new_file_path = os.path.join(history_dir, new_filename)
    return new_file_path, 0
        # 历史记录失败不应影响主流程，返回None

def register_embed_routes(app: FastAPI, get_embedding_service, get_thread_pool, get_config_service=None):
    """注册所有嵌入相关的API路由
    
    Args:
        app: FastAPI应用实例
        get_embedding_service: 嵌入服务获取函数（通过依赖注入）
        get_thread_pool: 线程池获取函数（通过依赖注入）
        get_config_service: 获取配置服务的依赖注入函数（可选）
    """
    @app.post("/embed/doc")
    async def embedding_files(files: List[UploadFile] = File(...), category: str = Form(None), 
                            embedding_service=Depends(get_embedding_service),
                            thread_pool=Depends(get_thread_pool)):
        """上传多个文档并进行嵌入处理
        
        Args:
            files: 上传的文件列表
            category: 可选的分类名称
            embedding_service: 通过依赖注入获取的嵌入服务实例
            thread_pool: 通过依赖注入获取的线程池实例
            
        Returns:
            JSONResponse: 包含处理结果的响应，包括成功状态、消息、每个文件的处理详情和总文件数
            
        Raises:
            HTTPException: 处理过程中出现错误时抛出500异常
        """
        try:
            results = []
            logger.info(f"category: {category} , 开始处理 {len(files)} 个文件: {[file.filename for file in files]}")
            for file in files:
                logger.info(f"开始处理上传文件: {file.filename}")
                # 读取文件内容
                file_content = await file.read()
                logger.info(f"文件信息: {file.filename} 大小: {len(file_content)} 字节")
                
                #保存文件到永久目录
                saved_file_path, saved_filename = save_doc_file_to_directory(
                    file_content=file_content,
                    filename=file.filename,
                    category=category,
                )
                logger.info(f"文件路径: {saved_filename} 已保存到 {saved_file_path}")

                # 创建metadata对象并添加分类
                metadata = {}
                if category:
                    metadata["category"] = category
                
                # 使用线程池执行同步的文件嵌入操作
                # 避免阻塞FastAPI的事件循环
                result = await run_in_thread_pool(
                    thread_pool,
                    embedding_service.embedding_file,
                    saved_file_path, metadata
                )

                if result['success']:
                    # 安全地获取记录数量
                    logger.info(f"文档 {saved_filename} 嵌入成功")
                    results.append({
                        "filename": saved_file_path,
                        "original_filename": file.filename,
                        "success": True,
                        "message": "文档上传并嵌入成功",
                        "data": result
                    })
                else:
                    logger.error(f"文档 {saved_filename} 嵌入失败: {result['message']}")
                    results.append({
                        "filename": saved_file_path,
                        "original_filename": file.filename,
                        "success": False,
                        "message": result['message']
                    })

            # 准备历史记录信息
            files_info = []
            for r in results:
                files_info.append({
                    "original_filename": r["original_filename"],
                    "saved_path": r["filename"],
                    "success": r["success"],
                    "message": r["message"]
                })
            
            # 保存上传历史记录
            save_upload_history(
                upload_type="doc",
                category=category,
                files_info=files_info,
                additional_info={"total_files": len(files)}
            )
            
            # 检查是否所有文件都成功
            all_success = all(r['success'] for r in results)
            return JSONResponse(content={
                "success": all_success,
                "message": "所有文件上传完成" if all_success else "部分文件上传失败",
                "results": results,
                "total_files": len(files)
            }, status_code=200 if all_success else 207)
        except Exception as e:
            logger.error(f"处理文件时出错: {str(e)}")
            raise HTTPException(status_code=500, detail=f"处理文件时出错: {str(e)}")

   
    @app.post("/embed/images")
    async def embedding_images(files: List[UploadFile] = File(...), metadata: str = Form(...),
                             embedding_service=Depends(get_embedding_service),
                             thread_pool=Depends(get_thread_pool)):
        """批量图片嵌入接口
        
        Args:
            files: 上传的图片文件列表
            metadata: 图片元数据（JSON字符串格式）
            embedding_service: 通过依赖注入获取的嵌入服务实例
            thread_pool: 通过依赖注入获取的线程池实例
            
        Returns:
            JSONResponse: 包含批量图片嵌入结果的响应，包括成功状态、消息、每个文件的处理详情和总文件数
            
        Raises:
            HTTPException: 处理过程中出现错误时抛出500异常
        """
        try:
            results = []
            logger.info(f"开始处理 {len(files)} 个图片文件，元数据: {metadata}")
            
            # 解析元数据JSON字符串
            metadata_dict = {}
            if metadata:
                try:
                    metadata_dict = json.loads(metadata)
                except json.JSONDecodeError as e:
                    logger.warning(f"元数据JSON解析失败: {str(e)}，使用空元数据")
            
            category = metadata_dict.get("category") if metadata_dict else None
            description = metadata_dict.get("description") if metadata_dict else None
            
            # 保存图片文件到永久目录
            saved_file_path, saved_filename = save_images_to_directory(
                files,
                category=category,
                description=description                
            )
            logger.info(f"图片已保存到永久目录: {saved_file_path}")

            for file in files:
                logger.info(f"开始处理上传图片: {file.filename}")
                # 文件内容已经在save_images_to_directory函数中读取并保存
                # 构建完整的图片文件路径
                image_file_path = os.path.join(saved_file_path, file.filename)
                
                # 使用线程池执行同步的图片嵌入操作
                # 避免阻塞FastAPI的事件循环
                result = await run_in_thread_pool(
                    thread_pool,
                    embedding_service.embedding_image,
                    image_file_path, metadata_dict
                )

                if result['success']:
                    logger.info(f"图片 {saved_filename} 嵌入成功")
                    results.append({
                        "filename": result['image_path'],
                        "saved_filename": result['image_path'],
                        "original_filename": file.filename,
                        "image_url": result['image_url'],
                        "success": True,
                        "message": "图片上传并嵌入成功",
                        "data": result
                    })
                else:
                    logger.error(f"图片 {saved_filename} 嵌入失败: {result['message']}")
                    results.append({
                        "filename": result['image_path'],
                        "saved_filename": result['image_path'],
                        "original_filename": file.filename,
                        "success": False,
                        "message": result['message']
                    })

            # 准备历史记录信息
            files_info = []
            for r in results:
                files_info.append({
                    "original_filename": r["original_filename"],
                    "saved_path": r["filename"],
                    "success": r["success"],
                    "message": r["message"]
                })
            
            # 保存上传历史记录
            save_upload_history(
                upload_type="image",
                category=category,
                files_info=files_info,
                additional_info={
                    "total_files": len(files),
                    "description": description
                }
            )
            
            # 检查是否所有文件都成功
            all_success = all(r['success'] for r in results)
            return JSONResponse(content={
                "success": all_success,
                "message": "所有图片上传完成" if all_success else "部分图片上传失败",
                "results": results,
                "total_files": len(files)
            }, status_code=200 if all_success else 207)
        except Exception as e:
            logger.error(f"处理图片时出错: {str(e)}")
            raise HTTPException(status_code=500, detail=f"处理图片时出错: {str(e)}")

    @app.post("/embed/text")
    async def embedding_text(request: TextEmbeddingRequest, 
                            embedding_service=Depends(get_embedding_service),
                            thread_pool=Depends(get_thread_pool)):
        """文本嵌入接口
        
        Args:
            request: 文本嵌入请求对象，包含text和metadata字段
            embedding_service: 通过依赖注入获取的嵌入服务实例
            thread_pool: 通过依赖注入获取的线程池实例
            
        Returns:
            JSONResponse: 包含文本嵌入结果的响应
            
        Raises:
            HTTPException: 处理过程中出现错误时抛出500异常
        """
        try:
            logger.info(f"开始处理文本嵌入请求，文本长度: {len(request.text)}")
            
            # 保存文本内容到文件
            category = request.metadata.get("category") if request.metadata else None
            saved_file_path, saved_filename = save_text_to_file(request.text, category)
            logger.info(f"文本已保存到文件: {saved_file_path}")
            
            # 使用线程池执行同步的文本嵌入操作
            # 避免阻塞FastAPI的事件循环
            result = await run_in_thread_pool(
                thread_pool,
                embedding_service.embedding_text,
                request.text, request.metadata
            )
            
            # 准备历史记录信息
            files_info = [{
                "saved_path": saved_file_path,
                "success": result['success'],
                "message": result['message'] if not result['success'] else "文本嵌入成功"
            }]
            
            # 保存上传历史记录
            save_upload_history(
                upload_type="text",
                category=category,
                files_info=files_info,
                additional_info={
                    "text_length": len(request.text),
                    "text_preview": request.text[:100] + ("..." if len(request.text) > 100 else "")
                }
            )
            
            if result['success']:
                logger.info("文本嵌入成功")
                return JSONResponse(content={
                    "success": True,
                    "message": "文本嵌入成功",
                    "embedding": result['embedding'],
                    "filename": saved_file_path,
                    "metadata": request.metadata
                }, status_code=200)
            else:
                logger.error(f"文本嵌入失败: {result['message']}")
                return JSONResponse(content={
                    "success": False,
                    "message": result['message'],
                    "filename": saved_file_path
                }, status_code=400)
        except Exception as e:
            logger.error(f"文本嵌入时出错: {str(e)}")
            raise HTTPException(status_code=500, detail=f"文本嵌入时出错: {str(e)}")

    @app.post("/embed/weburl")
    async def embedding_weburl(request: ParseWebURLRequest, 
                              embedding_service=Depends(get_embedding_service),
                              thread_pool=Depends(get_thread_pool)):
        """网页URL嵌入接口
        
        Args:
            request: 网页URL解析请求对象，包含url和metadata字段
            embedding_service: 通过依赖注入获取的嵌入服务实例
            thread_pool: 通过依赖注入获取的线程池实例
            
        Returns:
            JSONResponse: 包含网页嵌入结果的响应
            
        Raises:
            HTTPException: 处理过程中出现错误时抛出500异常
        """
        try:
            logger.info(f"开始处理 web url 解析请求:{request}")
            
            # 使用线程池执行同步的weburl嵌入操作
            # 避免阻塞FastAPI的事件循环
            result = await run_in_thread_pool(
                thread_pool,
                embedding_service.embedding_weburl,
                request.url, request.metadata
            )
            
            # 如果成功，保存网页内容到文件
            if result['success']:
                category = request.metadata.get("category") if request.metadata else None
                # 假设embedding_service.embedding_weburl返回的result包含网页内容
                # 如果不包含，可能需要修改embedding_service.embedding_weburl方法
                saved_file_path, saved_filename = save_url_to_file(
                    category=category,
                    url=result.get('url', '')
                )
                logger.info(f"网页内容已保存到文件: {saved_file_path}")
                result['filename'] = saved_file_path
                 
                # 准备历史记录信息
                files_info = [{
                    "original_filename": request.url,
                    "saved_path": saved_file_path,
                    "success": True,
                    "message": "web url嵌入成功"
                }]
                
                # 保存上传历史记录
                save_upload_history(
                    upload_type="weburl",
                    category=category,
                    files_info=files_info,
                    additional_info={
                        "total_urls": 1,
                        "url_list": [request.url]
                    }
                )
                
                logger.info("web url 嵌入成功")
                return JSONResponse(content={
                    "success": True,
                    "message": "web url嵌入成功",
                    "embedding": result['embedding'],
                    "filename": saved_file_path,
                    "metadata": request.metadata
                }, status_code=200)
            else:
                # 准备历史记录信息
                files_info = [{
                    "original_filename": request.url,
                    "saved_path": None,
                    "success": False,
                    "message": result['message']
                }]
                
                # 保存上传历史记录
                save_upload_history(
                    upload_type="weburl",
                    category=category,
                    files_info=files_info,
                    additional_info={
                        "total_urls": 1,
                        "url_list": [request.url]
                    }
                )
                
                logger.error(f"嵌入失败: {result['message']}")
                return JSONResponse(content={
                    "success": False,
                    "message": result['message']
                }, status_code=400)
        except Exception as e:
            logger.error(f"web url 嵌入时出错: {str(e)}")
            raise HTTPException(status_code=500, detail=f"web url嵌入时出错: {str(e)}")

    @app.post("/embed/weburl_batch")
    async def embedding_weburl_batch(request: ParseWebURLBatchRequest, 
                                   embedding_service=Depends(get_embedding_service),
                                   thread_pool=Depends(get_thread_pool)):
        """批量处理网页URL嵌入接口
        
        Args:
            request: 批量网页URL解析请求对象，包含urls列表和metadata字段
            embedding_service: 通过依赖注入获取的嵌入服务实例
            thread_pool: 通过依赖注入获取的线程池实例
            
        Returns:
            JSONResponse: 包含批量处理结果的响应，包括成功/失败数量和每个URL的处理详情
            
        Raises:
            HTTPException: 处理过程中出现错误时抛出500异常
        """
        try:
            logger.info(f"开始处理批量 web url 解析请求，共{len(request.urls)}个URL")
            
            # 使用线程池执行同步的批量weburl嵌入操作, 避免阻塞FastAPI的事件循环
            results = await run_in_thread_pool(
                thread_pool,
                embedding_service.embedding_weburl_batch,
                request.urls, request.metadata
            )
            
            if results['success']:
                # 处理每个成功的URL结果，保存网页内容
                category = request.metadata.get("category") if request.metadata else None
                for result in results['results']:                
                    saved_file_path, saved_filename = save_url_to_file(
                        category=category,
                        url=result.get('url', '')
                    )
                    logger.info(f"网页内容已保存到文件: {saved_file_path}")
                    result['filename'] = saved_file_path
                
                        
                # 准备历史记录信息
                files_info = []
                for result in results['results']:
                    files_info.append({
                        "original_filename": result.get("url", ""),
                        "saved_path": result.get("filename", None),
                        "success": result.get("success", False),
                        "message": result.get("message", "")
                    })
                
                # 保存上传历史记录
                save_upload_history(
                    upload_type="weburl_batch",
                    category=category,
                    files_info=files_info,
                    additional_info={
                        "total_urls": len(request.urls),
                        "url_list": request.urls,
                        "success_count": results['success_count'],
                        "failed_count": results['failed_count']
                    }
                )
                
                logger.info(f"批量web url 嵌入成功，成功{results['success_count']}个，失败{results['failed_count']}个")
                return JSONResponse(content={
                    "success": True,
                    "message": f"批量web url嵌入完成",
                    "results": results['results'],
                    "success_count": results['success_count'],
                    "failed_count": results['failed_count'],
                    "metadata": request.metadata
                }, status_code=200)
            else:
                # 准备历史记录信息
                files_info = [{
                    "original_filename": "batch_upload",
                    "saved_path": None,
                    "success": False,
                    "message": results['message']
                }]
                
                # 保存上传历史记录
                save_upload_history(
                    upload_type="weburl_batch",
                    category=category,
                    files_info=files_info,
                    additional_info={
                        "total_urls": len(request.urls),
                        "url_list": request.urls,
                        "error_message": results['message']
                    }
                )
                
                logger.error(f"批量嵌入失败: {results['message']}")
                return JSONResponse(content={
                    "success": False,
                    "message": results['message']
                }, status_code=400)
        except Exception as e:
            logger.error(f"批量web url 嵌入时出错: {str(e)}")
            raise HTTPException(status_code=500, detail=f"批量web url嵌入时出错: {str(e)}")


    def get_upload_history(page: int = 1, page_size: int = 20, upload_type: str = None, category: str = None, start_date: str = None, end_date: str = None):
        """获取上传历史记录

        Args:
            page: 当前页码，默认为1
            page_size: 每页记录数，默认为20
            upload_type: 可选的上传类型过滤（doc, text, image, weburl, weburl_batch）
            category: 可选的分类过滤
            start_date: 可选的开始日期过滤（格式：YYYY-MM-DD）
            end_date: 可选的结束日期过滤（格式：YYYY-MM-DD）

        Returns:
            dict: 包含历史记录列表、总页数、当前页码等信息的字典
        """
        try:
            logger.info(f"获取上传历史记录，页码: {page}, 每页数量: {page_size}, 类型: {upload_type}, 分类: {category}")
            
            # 验证参数
            if page < 1:
                page = 1
            if page_size < 1 or page_size > 100:
                page_size = 20
            
            # 所有历史记录列表
            all_history = []
            
            # 解析日期参数
            start_datetime = None
            end_datetime = None
            if start_date:
                try:
                    start_datetime = datetime.strptime(start_date, "%Y-%m-%d")
                except ValueError:
                    logger.warning(f"无效的开始日期格式: {start_date}")
            if end_date:
                try:
                    end_datetime = datetime.strptime(end_date, "%Y-%m-%d")
                    # 包含结束日期的全天
                    end_datetime = end_datetime.replace(hour=23, minute=59, second=59)
                except ValueError:
                    logger.warning(f"无效的结束日期格式: {end_date}")
            
            # 遍历所有年月目录
            if os.path.exists(HISTORY_DIR):
                # 按年月目录排序，最新的在前
                year_month_dirs = sorted([d for d in os.listdir(HISTORY_DIR) if os.path.isdir(os.path.join(HISTORY_DIR, d))], reverse=True)
                
                for year_month in year_month_dirs:
                    year_month_path = os.path.join(HISTORY_DIR, year_month)
                    # 按文件名排序，最新的在前
                    history_files = sorted([f for f in os.listdir(year_month_path) if f.endswith('.json')], reverse=True)
                    
                    for history_file in history_files:
                        file_path = os.path.join(year_month_path, history_file)
                        try:
                            with open(file_path, 'r', encoding='utf-8') as f:
                                history_entries = json.load(f)
                                
                            # 如果是单个记录（旧格式兼容）
                            if isinstance(history_entries, dict) and 'id' in history_entries:
                                history_entries = [history_entries]
                            
                            # 过滤记录
                            for entry in history_entries:
                                # 检查上传类型
                                if upload_type and entry.get('upload_type') != upload_type:
                                    continue
                                
                                # 检查分类
                                if category and entry.get('category') != category:
                                    continue
                                
                                # 检查日期范围
                                entry_date_str = entry.get('timestamp', '')
                                if entry_date_str:
                                    try:
                                        entry_date = datetime.fromisoformat(entry_date_str.replace('Z', '+00:00'))
                                        if start_datetime and entry_date < start_datetime:
                                            continue
                                        if end_datetime and entry_date > end_datetime:
                                            continue
                                    except ValueError:
                                        logger.warning(f"无效的时间戳格式: {entry_date_str}")
                                
                                # 添加文件路径信息
                                entry['file_path'] = file_path
                                all_history.append(entry)
                        except Exception as e:
                            logger.error(f"读取历史文件 {file_path} 时出错: {str(e)}")
            
            # 计算分页
            total_count = len(all_history)
            total_pages = (total_count + page_size - 1) // page_size
            start_index = (page - 1) * page_size
            end_index = start_index + page_size
            paginated_history = all_history[start_index:end_index]
            
            logger.info(f"获取上传历史记录成功，共 {total_count} 条记录，当前显示第 {page} 页")
            
            return {
                "success": True,
                "data": paginated_history,
                "total_count": total_count,
                "total_pages": total_pages,
                "current_page": page,
                "page_size": page_size
            }
        except Exception as e:
            logger.error(f"获取上传历史记录时出错: {str(e)}")
            return {
                "success": False,
                "message": f"获取上传历史记录时出错: {str(e)}"
            }


    @app.get("/history")
    async def get_history_endpoint(
        page: int = 1,
        page_size: int = 20,
        upload_type: str = None,
        category: str = None,
        start_date: str = None,
        end_date: str = None
    ):
        """获取上传历史记录的API端点（通用入口）

        Args:
            page: 当前页码，默认为1
            page_size: 每页记录数，默认为20
            upload_type: 可选的上传类型过滤（doc, text, image, weburl, weburl_batch）
            category: 可选的分类过滤
            start_date: 可选的开始日期过滤（格式：YYYY-MM-DD）
            end_date: 可选的结束日期过滤（格式：YYYY-MM-DD）

        Returns:
            JSONResponse: 包含历史记录列表、总页数、当前页码等信息的响应
        """
        try:
            result = get_upload_history(page, page_size, upload_type, category, start_date, end_date)
            if result["success"]:
                return JSONResponse(content=result, status_code=200)
            else:
                return JSONResponse(content=result, status_code=400)
        except Exception as e:
            logger.error(f"处理获取上传历史记录请求时出错: {str(e)}")
            raise HTTPException(status_code=500, detail=f"处理获取上传历史记录请求时出错: {str(e)}")

    @app.get("/history/upload")
    async def get_upload_history_endpoint(
        page: int = 1,
        page_size: int = 20,
        upload_type: str = None,
        category: str = None,
        start_date: str = None,
        end_date: str = None
    ):
        """获取上传历史记录的API端点

        Args:
            page: 当前页码，默认为1
            page_size: 每页记录数，默认为20
            upload_type: 可选的上传类型过滤（doc, text, image, weburl, weburl_batch）
            category: 可选的分类过滤
            start_date: 可选的开始日期过滤（格式：YYYY-MM-DD）
            end_date: 可选的结束日期过滤（格式：YYYY-MM-DD）

        Returns:
            JSONResponse: 包含历史记录列表、总页数、当前页码等信息的响应
        """
        try:
            result = get_upload_history(page, page_size, upload_type, category, start_date, end_date)
            if result["success"]:
                return JSONResponse(content=result, status_code=200)
            else:
                return JSONResponse(content=result, status_code=400)
        except Exception as e:
            logger.error(f"处理获取上传历史记录请求时出错: {str(e)}")
            raise HTTPException(status_code=500, detail=f"处理获取上传历史记录请求时出错: {str(e)}")
            