import uuid
from fastapi import APIRouter, Depends, HTTPException, status, UploadFile, File, BackgroundTasks, Query, Path, Form, Request
from sqlalchemy.orm import Session
from typing import List, Dict, Any, Optional
import os
from datetime import datetime, timedelta
import logging
import pandas as pd
from fastapi.responses import FileResponse, JSONResponse
import json
import shutil
import asyncio
import threading
import re

from ..database.database import get_db, SessionLocal
from ..models.shop import Product, Shop
from .models import DataCleanTask, UnitConfig, UnitConversion, BrandConfig, CommonProductName, MarketingWordConfig, SystemConfig
from .schemas import (
    DataCleanTaskCreate,
    DataCleanTaskUpdate,
    DataCleanTask as DataCleanTaskSchema,
    DataCleanTaskOut,
    DataCleanTaskStatusEnum
)
from ..routes.auth import get_current_active_user, User
from .processor import process_clean_task, get_product_stats, get_excel_preview
from .validate import check_source_file
from .config_manager import config_manager
from ..utils.logger import get_logger

# 创建路由
router = APIRouter(
    prefix="/api/data-clean",
    tags=["数据清洗"],
    responses={404: {"description": "Not found"}},
)

# 创建日志记录器
logger = get_logger(__name__)

# 文件上传目录
UPLOAD_DIR = "uploads/data_clean"
RESULT_DIR = "results/data_clean"

# 确保目录存在
os.makedirs(UPLOAD_DIR, exist_ok=True)
os.makedirs(RESULT_DIR, exist_ok=True)

# 获取项目根目录的绝对路径
def get_project_root():
    current_dir = os.path.abspath(os.getcwd())
    # 如果当前目录是backend，则上一级目录是项目根目录
    return os.path.abspath(os.path.join(current_dir, os.pardir)) if current_dir.endswith('backend') else current_dir

# 获取绝对路径
def get_abs_path(relative_path):
    return os.path.join(get_project_root(), relative_path)

@router.get("/tasks", response_model=List[DataCleanTaskOut], summary="获取数据清洗任务列表")
async def get_data_clean_tasks(
    status: Optional[str] = Query(None, description="任务状态过滤"),
    keyword: Optional[str] = Query(None, description="关键词搜索"),
    page: int = Query(1, description="页码", ge=1),
    page_size: int = Query(10, description="每页条数", ge=1, le=100),
    db: Session = Depends(get_db)
):
    """
    获取所有数据清洗任务列表，支持分页和过滤
    """
    skip = (page - 1) * page_size
    
    # 构建查询
    query = db.query(DataCleanTask)
    
    # 应用过滤条件
    if status:
        query = query.filter(DataCleanTask.status == status)
    if keyword:
        query = query.filter(DataCleanTask.task_name.contains(keyword))
    
    # 获取总数
    total = query.count()
    
    # 获取分页数据
    tasks = query.order_by(DataCleanTask.created_at.desc()).offset(skip).limit(page_size).all()
    
    # 将结果格式化为SchemaOut对象
    result = []
    for task in tasks:
        task_dict = {
            "id": task.id,
            "task_name": task.task_name,
            "source_type": task.source_type,
            "file_path": task.file_path,
            "status": task.status,
            "error_message": task.error_message,
            "created_at": task.created_at,
            "updated_at": task.updated_at,
            "created_by": task.created_by
        }
        result.append(task_dict)
    
    return result

@router.post("/tasks", response_model=DataCleanTaskOut, summary="创建数据清洗任务")
async def create_data_clean_task(
    name: Optional[str] = Form(None, description="任务名称，不提供则使用文件名"),
    file: UploadFile = File(..., description="上传的Excel文件"),
    db: Session = Depends(get_db)
):
    """
    创建一个新的数据清洗任务，上传Excel文件
    """
    # 检查文件类型
    if not file.filename.endswith(('.xlsx', '.xls')):
        raise HTTPException(status_code=400, detail="只支持Excel文件格式 (.xlsx, .xls)")
    
    # 检查文件大小 (限制为10MB)
    file_size = 0
    content = await file.read()
    file_size = len(content)
    await file.seek(0)  # 重置文件指针位置
    
    if file_size > 10 * 1024 * 1024:  # 10MB
        raise HTTPException(status_code=400, detail="文件大小不能超过10MB")
    
    # 确保上传目录存在
    os.makedirs("uploads/data_clean", exist_ok=True)
    
    # 生成唯一文件名
    timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
    unique_filename = f"{timestamp}_{file.filename}"
    
    # 使用绝对路径
    abs_upload_dir = get_abs_path(UPLOAD_DIR)
    source_file_path = os.path.join(abs_upload_dir, unique_filename)
    
    logger.info(f"保存上传文件到绝对路径: {source_file_path}")
    
    # 保存文件
    with open(source_file_path, "wb") as buffer:
        buffer.write(content)
    
    # 验证文件结构
    try:
        check_source_file(source_file_path)
    except Exception as e:
        # 删除已上传的文件
        if os.path.exists(source_file_path):
            os.remove(source_file_path)
        raise HTTPException(status_code=400, detail=str(e))
    
    # 如果未提供任务名称，使用文件名
    if not name:
        name = file.filename.split('.')[0]  # 移除扩展名
    
    # 创建任务记录
    task = DataCleanTask(
        name=name,
        task_name=name,  # 同时设置task_name字段与name保持一致
        source_type="excel",
        file_path=source_file_path,
        status=DataCleanTaskStatusEnum.PENDING.value,
        created_by="system"  # 可以替换为实际用户
    )
    
    db.add(task)
    db.commit()
    db.refresh(task)
    
    # 返回任务信息
    return {
        "id": task.id,
        "task_name": task.name,  # 使用name字段替代task_name
        "source_type": task.source_type,
        "file_path": task.file_path,
        "status": task.status,
        "error_message": task.error_message,
        "created_at": task.created_at,
        "updated_at": task.updated_at,
        "created_by": task.created_by
    }

# 根据 run_id 来更新 item表中的 cleaned_name,item_brand,base_num,base_unit,item_specs,package_type,item_classification,并根据 商品价格 和 base_num 来计算 base_price,
@router.post("/update_item_by_run_id/{run_id}", summary="根据run_id更新item表")
async def update_item_by_run_id(
    run_id: int,  # 直接从路径参数获取，移除Form
    db: Session = Depends(get_db)
):
    """
    根据run_id更新item表中的 cleaned_name,item_brand,base_num,base_unit,item_specs,package_type,item_classification,并根据 商品价格 和 base_num 来计算 base_price,
    """
    try:
        logger.info(f"run_id: {run_id}")
        # 查询需要更新的数据
        results = db.query(Product).filter(
            Product.run_id == run_id
        ).all()
        logger.info(f"results: {results}")
        if not results:
            return {
                "success": False,
                "message": f"未找到run_id={run_id}的清洗结果"
            }
            
        total_count = len(results)
        processed_count = 0
        updated_count = 0
        error_count = 0
        
        # print("results type:", type(results))
        logger.info(f"results type: {type(results)}")
        
        for result in results:
            try:
                # 查找对应的itemSKU记录
                item_sku = db.query(ItemSKU).filter(
                    ItemSKU.sku == result.sku_id
                ).first()
                
                if item_sku:
                    # 更新item表字段
                    result.cleaned_name = item_sku.cleaned_name
                    result.item_brand = item_sku.item_brand
                    result.base_num = item_sku.base_num
                    result.base_unit = item_sku.base_unit
                    result.item_specs = item_sku.item_specs
                    result.package_type = item_sku.package_type
                    result.item_classification = item_sku.item_classification
                    
                    # 计算base_price
                    if result.item_price and result.base_num and result.base_num > 0:
                        result.base_price = result.item_price / result.base_num
                    
                    updated_count += 1
                
                processed_count += 1
                
                # 每处理100条记录保存一次
                if processed_count % 100 == 0:
                    db.commit()
                    
            except Exception as e:
                logger.error(f"处理记录 {result.id} 时出错: {str(e)}")
                error_count += 1
                continue
                
        # 最后提交一次
        db.commit()
        
        return {
            "success": True,
            "total_count": total_count,
            "processed_count": processed_count,
            "updated_count": updated_count,
            "error_count": error_count,
            "message": f"更新完成: 共处理 {processed_count} 条记录，更新 {updated_count} 条，失败 {error_count} 条"
        }
        
    except Exception as e:
        logger.error(f"更新item表时发生错误: {str(e)}")
        db.rollback()
        return {
            "success": False,
            "message": f"更新失败: {str(e)}"
        }


@router.get("/tasks/{task_id}", response_model=DataCleanTaskOut, summary="获取单个任务详情")
async def get_data_clean_task(
    task_id: int = Path(..., description="任务ID"),
    db: Session = Depends(get_db)
):
    """
    根据ID获取单个数据清洗任务的详细信息
    """
    task = db.query(DataCleanTask).filter(DataCleanTask.id == task_id).first()
    if not task:
        raise HTTPException(status_code=404, detail="任务不存在")
    
    return {
        "id": task.id,
        "task_name": task.task_name,
        "source_type": task.source_type,
        "file_path": task.file_path,
        "status": task.status,
        "error_message": task.error_message,
        "created_at": task.created_at,
        "updated_at": task.updated_at,
        "created_by": task.created_by
    }

@router.delete("/tasks/{task_id}", response_model=dict, summary="删除数据清洗任务")
async def delete_data_clean_task(
    task_id: int = Path(..., description="任务ID"),
    db: Session = Depends(get_db)
):
    """
    删除指定ID的数据清洗任务及其相关文件
    """
    task = db.query(DataCleanTask).filter(DataCleanTask.id == task_id).first()
    if not task:
        raise HTTPException(status_code=404, detail="任务不存在")
    
    # 删除相关文件
    if task.file_path and os.path.exists(task.file_path):
        os.remove(task.file_path)
    
    # 删除结果文件
    result_path = f"results/data_clean/task_{task_id}.xlsx"
    if os.path.exists(result_path):
        os.remove(result_path)
    
    # 删除数据库记录
    db.delete(task)
    db.commit()
    
    return {"message": "任务删除成功"}

@router.post("/tasks/{task_id}/clean", response_model=dict, summary="执行数据清洗")
async def clean_task(
    task_id: int = Path(..., description="任务ID"), 
    db: Session = Depends(get_db)
):
    """
    开始对指定任务执行数据清洗处理
    """
    task = db.query(DataCleanTask).filter(DataCleanTask.id == task_id).first()
    if not task:
        raise HTTPException(status_code=404, detail="任务不存在")
    
    if task.status == DataCleanTaskStatusEnum.PROCESSING.value:
        raise HTTPException(status_code=400, detail="任务正在处理中")
    
    # 更新任务状态为处理中
    task.status = DataCleanTaskStatusEnum.PROCESSING.value
    task.error_message = None
    db.commit()
    
    # 在开始处理任务前，确保配置管理器已重新加载最新配置
    logger.info(f"开始任务 {task_id} 前刷新配置管理器")
    config_manager.reload(db)
    logger.info(f"配置刷新完成，当前已加载品牌数量: {len(config_manager.brands)}")
    
    try:
        # 创建新的数据库会话，因为这个会话需要在线程中使用
        # 直接使用当前函数的db会导致线程安全问题
        thread_db = SessionLocal()
        logger.info(f"创建新的数据库会话: {thread_db}")
        
        # 在后台线程中执行数据清洗
        def process_task():
            try:
                # 使用新的数据库会话
                process_clean_task(task_id, thread_db)
            except Exception as e:
                logger.error(f"处理任务 {task_id} 时出错: {str(e)}")
                # 更新任务状态为失败
                err_task = thread_db.query(DataCleanTask).filter(DataCleanTask.id == task_id).first()
                if err_task:
                    err_task.status = DataCleanTaskStatusEnum.FAILED.value
                    err_task.error_message = str(e)
                    thread_db.commit()
            finally:
                # 确保会话被关闭
                thread_db.close()
        
        # 启动线程
        cleaning_thread = threading.Thread(target=process_task)
        cleaning_thread.daemon = True  # 设置为守护线程，这样主进程结束时，线程也会结束
        cleaning_thread.start()
        
        return {"message": "已开始数据清洗处理"}
    except Exception as e:
        # 如果启动线程失败，回滚任务状态
        logger.error(f"启动清洗任务失败: {str(e)}")
        task.status = DataCleanTaskStatusEnum.FAILED.value
        task.error_message = f"启动任务失败: {str(e)}"
        db.commit()
        raise HTTPException(status_code=500, detail=f"启动清洗任务失败: {str(e)}")

@router.get("/tasks/{task_id}/download", summary="下载清洗结果")
async def download_result(
    task_id: int = Path(..., description="任务ID"),
    db: Session = Depends(get_db)
):
    
    logger.info(f"开始下载任务 {task_id} 的结果文件")
    """
    下载完成的数据清洗任务结果文件
    """
    task = db.query(DataCleanTask).filter(DataCleanTask.id == task_id).first()
    if not task:
        raise HTTPException(status_code=404, detail="任务不存在")
    
    if task.status != DataCleanTaskStatusEnum.COMPLETED.value:
        raise HTTPException(status_code=400, detail="任务还未完成清洗")
    
    # 如果任务完成但没有记录结果文件路径，尝试构建默认路径
    if not task.result_file_path or not os.path.exists(task.result_file_path):
        # 尝试在results目录下查找与任务ID相关的文件
        abs_result_dir = get_abs_path(RESULT_DIR)
        
        # 记录搜索路径
        logger.info(f"搜索结果文件的绝对路径: {abs_result_dir}")
        
        # 搜索可能的文件名模式
        potential_files = []
        if os.path.exists(abs_result_dir):
            for file in os.listdir(abs_result_dir):
                # 首先查找包含任务ID的文件
                if f"_id{task_id}_" in file and file.endswith('.xlsx'):
                    abs_file_path = os.path.join(abs_result_dir, file).replace('\\', '/')
                    potential_files.append(abs_file_path)
                    logger.info(f"找到匹配的文件: {abs_file_path}")
            
            # 如果没有找到，尝试使用任务名称查找
            if not potential_files:
                task_display_name = task.task_name or task.name or f"task_{task_id}"
                safe_task_name = re.sub(r'[\\/*?:"<>|]', "_", task_display_name)
                for file in os.listdir(abs_result_dir):
                    if file.startswith(safe_task_name) and file.endswith('.xlsx'):
                        abs_file_path = os.path.join(abs_result_dir, file).replace('\\', '/')
                        potential_files.append(abs_file_path)
                        logger.info(f"找到匹配的文件(通过名称): {abs_file_path}")
        
        # 按最后修改时间排序，取最新的结果文件
        if potential_files:
            potential_files.sort(key=lambda x: os.path.getmtime(x), reverse=True)
            result_file_path = potential_files[0]  # 已经是绝对路径并且使用正斜杠
            
            # 更新数据库中的结果文件路径
            task.result_file_path = result_file_path
            db.commit()
            logger.info(f"更新任务 {task_id} 的结果文件路径: {result_file_path}")
        else:
            raise HTTPException(status_code=404, detail="结果文件不存在，请重新运行清洗任务")
    else:
        # 确保保存的路径是绝对路径
        if not os.path.isabs(task.result_file_path):
            task.result_file_path = os.path.abspath(task.result_file_path).replace('\\', '/')
            db.commit()
            logger.info(f"将相对路径转换为绝对路径: {task.result_file_path}")
        
        result_file_path = task.result_file_path
    
    # 检查文件是否存在
    logger.info(f"尝试访问文件: {result_file_path}")
    if not os.path.exists(result_file_path):
        # 如果路径中包含反斜杠，尝试使用正斜杠
        alt_path = result_file_path.replace('/', '\\')
        if os.path.exists(alt_path):
            result_file_path = alt_path
            logger.info(f"找到替代路径: {result_file_path}")
        else:
            raise HTTPException(status_code=404, detail="结果文件不存在或已被移除，请重新运行清洗任务")
    
    # 获取文件名部分
    file_name = os.path.basename(result_file_path)
    logger.info(f">>>文件名: {file_name}")
    logger.info(f">>>文件路径: {result_file_path}")
    
    return FileResponse(
        path=result_file_path, 
        filename=file_name,
        media_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
      
    )

@router.get("/tasks/{task_id}/preview", summary="预览Excel数据")
async def preview_excel(
    task_id: int = Path(..., description="任务ID"),
    sheet_name: Optional[str] = Query(None, description="工作表名称"),
    db: Session = Depends(get_db)
):
    """
    预览上传的Excel文件的内容，返回表头和前10行数据
    """
    task = db.query(DataCleanTask).filter(DataCleanTask.id == task_id).first()
    if not task:
        raise HTTPException(status_code=404, detail="任务不存在")
    
    if not os.path.exists(task.file_path):
        raise HTTPException(status_code=404, detail="源文件不存在")
    
    try:
        preview_data = get_excel_preview(task.file_path, sheet_name)
        return preview_data
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"预览失败: {str(e)}")

@router.get("/tasks/{task_id}/stats")
async def get_task_stats(
    task_id: int,
    db: Session = Depends(get_db),
    current_user: User = Depends(get_current_active_user)
):
    """获取任务的统计信息"""
    try:
        task = db.query(DataCleanTask).filter(DataCleanTask.id == task_id).first()
        if not task:
            raise HTTPException(status_code=404, detail="任务不存在")
            
        if not task.result_file_path:
            return JSONResponse(content={
                "task_id": task.id,
                "task_name": task.name,
                "created_at": task.created_at.strftime('%Y-%m-%dT%H:%M:%S') if task.created_at else None,
                "completed_at": task.completed_at.strftime('%Y-%m-%dT%H:%M:%S') if task.completed_at else None,
                "status": task.status,
                "total_products": 0,
                "identified_brands": 0,
                "brands_in_database": {},
                "brands_not_in_database": {},
                "units_distribution": {},
                "error": "任务没有结果文件"
            })
        
        # 确保使用绝对路径
        result_file_path = task.result_file_path
        if not os.path.isabs(result_file_path):
            # 转换相对路径为绝对路径
            result_file_path = get_abs_path(result_file_path)
            # 更新数据库记录
            task.result_file_path = result_file_path
            db.commit()
        
        # 检查文件是否存在
        logger.info(f"尝试读取统计文件: {result_file_path}")
        if not os.path.exists(result_file_path):
            # 尝试使用替代路径
            alt_path = result_file_path.replace('/', '\\')
            if os.path.exists(alt_path):
                result_file_path = alt_path
                logger.info(f"找到替代统计文件路径: {result_file_path}")
            else:
                return JSONResponse(content={
                    "task_id": task.id,
                    "task_name": task.name,
                    "created_at": task.created_at.strftime('%Y-%m-%dT%H:%M:%S') if task.created_at else None,
                    "completed_at": task.completed_at.strftime('%Y-%m-%dT%H:%M:%S') if task.completed_at else None,
                    "status": task.status,
                    "total_products": 0,
                    "identified_brands": 0,
                    "brands_in_database": {},
                    "brands_not_in_database": {},
                    "units_distribution": {},
                    "error": "结果文件不存在"
                })
        
        # 读取结果文件
        try:
            logger.info(f"读取结果文件: {result_file_path}")
            df = pd.read_excel(result_file_path)
            logger.info(f"成功读取结果文件，共 {len(df)} 行数据，列: {df.columns.tolist()}")
            
            # 生成统计信息
            stats = get_product_stats(df, db)
            logger.info(f"生成统计信息: 总商品数 {stats.get('total_products')}, 品牌识别数 {stats.get('identified_brands')}")
            
            # 合并品牌分布数据用于前端展示
            brands_in_db = stats.get('brands_in_database', {})
            brands_not_in_db = stats.get('brands_not_in_database', {})
            
            # 合并两个品牌字典，并添加标记
            brand_distribution = {}
            for brand, count in brands_in_db.items():
                brand_distribution[f"{brand} (数据库)"] = count
                
            for brand, count in brands_not_in_db.items():
                brand_distribution[brand] = count
            
            # 添加任务基本信息
            stats.update({
                "task_id": task.id,
                "task_name": task.name,
                "created_at": task.created_at.strftime('%Y-%m-%dT%H:%M:%S') if task.created_at else None,
                "completed_at": task.completed_at.strftime('%Y-%m-%dT%H:%M:%S') if task.completed_at else None,
                "status": task.status,
                "brand_distribution": brand_distribution  # 添加合并后的品牌分布
            })
            
            return JSONResponse(content=stats)
        except Exception as e:
            logger.error(f"获取任务统计信息失败: {str(e)}")
            # 返回带有错误信息的JSON响应而不是抛出HTTP异常
            return JSONResponse(
                status_code=500,
                content={
                    "task_id": task_id,
                    "error": f"获取任务统计信息失败: {str(e)}",
                    "total_products": 0,
                    "identified_brands": 0,
                    "brands_in_database": {},
                    "brands_not_in_database": {},
                    "units_distribution": {},
                    "brand_distribution": {}
                }
            )
    except Exception as e:
        logger.error(f"获取任务统计信息失败: {str(e)}")
        # 返回带有错误信息的JSON响应而不是抛出HTTP异常
        return JSONResponse(
            status_code=500,
            content={
                "task_id": task_id,
                "error": f"获取任务统计信息失败: {str(e)}",
                "total_products": 0,
                "identified_brands": 0,
                "brands_in_database": {},
                "brands_not_in_database": {},
                "units_distribution": {},
                "brand_distribution": {}
            }
        ) 
    
@router.get("/stats", summary="获取所有任务的统计信息")
async def get_all_task_stats(
    db: Session = Depends(get_db),
    current_user: User = Depends(get_current_active_user)
):
    """获取所有任务的统计信息"""
    try:
        # 获取所有任务
        tasks = db.query(DataCleanTask).all()
        
        # 统计各状态的任务数量
        total_tasks = len(tasks)
        pending_count = 0
        processing_count = 0
        completed_count = 0
        failed_count = 0
        
        # 每日任务统计
        today = datetime.now().date()
        daily_stats = {}
        
        # 最近7天的日期
        for i in range(7):
            date = today - timedelta(days=i)
            daily_stats[date.strftime("%Y-%m-%d")] = 0
        
        # 状态分布和时间分布统计
        for task in tasks:
            # 统计状态分布
            if task.status == "pending":
                pending_count += 1
            elif task.status == "processing":
                processing_count += 1
            elif task.status == "completed":
                completed_count += 1
            elif task.status == "failed":
                failed_count += 1
            
            # 按创建日期统计
            task_date = task.created_at.date()
            date_str = task_date.strftime("%Y-%m-%d")
            if date_str in daily_stats:
                daily_stats[date_str] += 1
        
        # 获取最近完成的5个任务
        recent_completed_tasks = db.query(DataCleanTask).filter(
            DataCleanTask.status == "completed"
        ).order_by(DataCleanTask.completed_at.desc()).limit(5).all()
        
        recent_tasks = []
        for task in recent_completed_tasks:
            recent_tasks.append({
                "id": task.id,
                "task_name": task.task_name,
                "created_at": task.created_at.strftime('%Y-%m-%dT%H:%M:%S') if task.created_at else None,
                "completed_at": task.completed_at.strftime('%Y-%m-%dT%H:%M:%S') if task.completed_at else None,
                "source_type": task.source_type
            })
        
        # 如果数据库中没有数据，添加一些测试数据
        if total_tasks == 0:
            import random
            # 添加任务统计模拟数据
            total_tasks = 8
            pending_count = 2
            processing_count = 1
            completed_count = 4
            failed_count = 1
            
            # 添加每日统计模拟数据
            for date_str in daily_stats:
                daily_stats[date_str] = random.randint(0, 3)
            
            # 添加最近任务模拟数据
            if not recent_tasks:
                for i in range(5):
                    created_date = datetime.now() - timedelta(days=random.randint(1, 10))
                    completed_date = created_date + timedelta(hours=random.randint(1, 24))
                    recent_tasks.append({
                        "id": i + 1,
                        "task_name": f"清洗数据任务 {i+1}",
                        "created_at": created_date.strftime('%Y-%m-%dT%H:%M:%S'),
                        "completed_at": completed_date.strftime('%Y-%m-%dT%H:%M:%S'),
                        "source_type": random.choice(["file", "database"])
                    })
        
        # 构建统计结果
        stats = {
            "total": total_tasks,
            "status_distribution": {
                "pending": pending_count,
                "processing": processing_count,
                "completed": completed_count,
                "failed": failed_count
            },
            "daily_stats": daily_stats,
            "recent_completed_tasks": recent_tasks
        }
        
        return stats
    except Exception as e:
        import traceback
        traceback.print_exc()
        raise HTTPException(
            status_code=500,
            detail=f"获取任务统计信息失败: {str(e)}"
        ) 
    
# 单位转换规则管理接口
@router.get("/units", summary="获取所有单位配置")
async def get_all_units(
    db: Session = Depends(get_db)
):
    """
    获取所有单位配置列表
    """
    units = db.query(UnitConfig).all()
    return [
        {
            "id": unit.id,
            "name": unit.name,
            "type": unit.type,
            "is_active": unit.is_active,
            "created_at": unit.created_at,
            "updated_at": unit.updated_at
        }
        for unit in units
    ]

@router.post("/units", summary="创建单位配置")
async def create_unit(
    name: str = Form(..., description="单位名称"),
    type: str = Form(..., description="单位类型"),
    is_active: bool = Form(True, description="是否激活"),
    db: Session = Depends(get_db)
):
    """
    创建新的单位配置
    """
    # 检查单位是否已存在
    existing = db.query(UnitConfig).filter(UnitConfig.name == name).first()
    if existing:
        raise HTTPException(status_code=400, detail=f"单位 '{name}' 已存在")
    
    # 创建新单位
    unit = UnitConfig(
        name=name,
        type=type,
        is_active=is_active
    )
    
    db.add(unit)
    db.commit()
    db.refresh(unit)
    
    # 重新加载配置
    config_manager.reload(db)
    
    return {
        "id": unit.id,
        "name": unit.name,
        "type": unit.type,
        "is_active": unit.is_active,
        "created_at": unit.created_at,
        "updated_at": unit.updated_at,
        "message": "单位配置创建成功"
    }

@router.put("/units/{unit_id}", summary="更新单位配置")
async def update_unit(
    unit_id: int = Path(..., description="单位配置ID"),
    name: Optional[str] = Form(None, description="单位名称"),
    type: Optional[str] = Form(None, description="单位类型"),
    is_active: Optional[bool] = Form(None, description="是否激活"),
    db: Session = Depends(get_db)
):
    """
    更新单位配置
    """
    # 获取单位配置
    unit = db.query(UnitConfig).filter(UnitConfig.id == unit_id).first()
    if not unit:
        raise HTTPException(status_code=404, detail="单位配置不存在")
    
    # 更新字段
    if name is not None:
        # 检查新名称是否与其他单位冲突
        if name != unit.name:
            existing = db.query(UnitConfig).filter(UnitConfig.name == name).first()
            if existing:
                raise HTTPException(status_code=400, detail=f"单位名称 '{name}' 已被使用")
        unit.name = name
    
    if type is not None:
        unit.type = type
    
    if is_active is not None:
        unit.is_active = is_active
    
    # 更新时间戳
    unit.updated_at = datetime.now()
    
    db.commit()
    db.refresh(unit)
    
    # 重新加载配置
    config_manager.reload(db)
    
    return {
        "id": unit.id,
        "name": unit.name,
        "type": unit.type,
        "is_active": unit.is_active,
        "created_at": unit.created_at,
        "updated_at": unit.updated_at,
        "message": "单位配置更新成功"
    }

@router.delete("/units/{unit_id}", summary="删除单位配置")
async def delete_unit(
    unit_id: int = Path(..., description="单位配置ID"),
    db: Session = Depends(get_db)
):
    """
    删除单位配置
    """
    # 获取单位配置
    unit = db.query(UnitConfig).filter(UnitConfig.id == unit_id).first()
    if not unit:
        raise HTTPException(status_code=404, detail="单位配置不存在")
    
    # 检查是否有关联的转换规则
    related_rules = db.query(UnitConversion).filter(
        (UnitConversion.source_unit == unit.name) | 
        (UnitConversion.target_unit == unit.name)
    ).count()
    
    if related_rules > 0:
        raise HTTPException(
            status_code=400, 
            detail=f"无法删除单位 '{unit.name}'，它被用于 {related_rules} 个转换规则中"
        )
    
    # 删除单位
    db.delete(unit)
    db.commit()
    
    # 重新加载配置
    config_manager.reload(db)
    
    return {"message": f"单位配置 '{unit.name}' 已删除"}

# 单位转换规则接口
@router.get("/unit-conversions", summary="获取所有单位转换规则")
async def get_all_unit_conversions(
    db: Session = Depends(get_db)
):
    """
    获取所有单位转换规则
    """
    conversions = db.query(UnitConversion).all()
    return [
        {
            "id": conv.id,
            "source_unit": conv.source_unit,
            "target_unit": conv.target_unit,
            "conversion_formula": conv.conversion_formula,
            "description": conv.description,
            "is_active": conv.is_active,
            "created_at": conv.created_at,
            "updated_at": conv.updated_at
        }
        for conv in conversions
    ]

@router.post("/unit-conversions", summary="创建单位转换规则")
async def create_unit_conversion(
    source_unit: str = Form(..., description="源单位"),
    target_unit: str = Form(..., description="目标单位"),
    conversion_formula: str = Form(..., description="转换公式，如 'x * 2'"),
    description: Optional[str] = Form(None, description="描述"),
    is_active: bool = Form(True, description="是否激活"),
    db: Session = Depends(get_db)
):
    """
    创建新的单位转换规则
    """
    # 验证单位是否存在
    source = db.query(UnitConfig).filter(UnitConfig.name == source_unit).first()
    if not source:
        raise HTTPException(status_code=400, detail=f"源单位 '{source_unit}' 不存在")
    
    target = db.query(UnitConfig).filter(UnitConfig.name == target_unit).first()
    if not target:
        raise HTTPException(status_code=400, detail=f"目标单位 '{target_unit}' 不存在")
    
    # 验证公式是否有效
    try:
        # 尝试将公式转换为lambda函数
        test_formula = eval(f"lambda x: {conversion_formula}")
        # 测试使用值1
        test_formula(1)
    except Exception as e:
        raise HTTPException(status_code=400, detail=f"转换公式无效: {str(e)}")
    
    # 检查是否已存在同样的转换规则
    existing = db.query(UnitConversion).filter(
        UnitConversion.source_unit == source_unit,
        UnitConversion.target_unit == target_unit
    ).first()
    
    if existing:
        raise HTTPException(
            status_code=400, 
            detail=f"已存在从 '{source_unit}' 到 '{target_unit}' 的转换规则"
        )
    
    # 创建新的转换规则
    conversion = UnitConversion(
        source_unit=source_unit,
        target_unit=target_unit,
        conversion_formula=conversion_formula,
        description=description,
        is_active=is_active
    )
    
    db.add(conversion)
    db.commit()
    db.refresh(conversion)
    
    # 重新加载配置
    config_manager.reload(db)
    
    return {
        "id": conversion.id,
        "source_unit": conversion.source_unit,
        "target_unit": conversion.target_unit,
        "conversion_formula": conversion.conversion_formula,
        "description": conversion.description,
        "is_active": conversion.is_active,
        "created_at": conversion.created_at,
        "updated_at": conversion.updated_at,
        "message": "单位转换规则创建成功"
    }

@router.put("/unit-conversions/{conversion_id}", summary="更新单位转换规则")
async def update_unit_conversion(
    conversion_id: int = Path(..., description="转换规则ID"),
    conversion_formula: Optional[str] = Form(None, description="转换公式"),
    description: Optional[str] = Form(None, description="描述"),
    is_active: Optional[bool] = Form(None, description="是否激活"),
    db: Session = Depends(get_db)
):
    """
    更新单位转换规则
    """
    # 获取转换规则
    conversion = db.query(UnitConversion).filter(UnitConversion.id == conversion_id).first()
    if not conversion:
        raise HTTPException(status_code=404, detail="转换规则不存在")
    
    # 更新字段
    if conversion_formula is not None:
        # 验证公式是否有效
        try:
            # 尝试将公式转换为lambda函数
            test_formula = eval(f"lambda x: {conversion_formula}")
            # 测试使用值1
            test_formula(1)
            conversion.conversion_formula = conversion_formula
        except Exception as e:
            raise HTTPException(status_code=400, detail=f"转换公式无效: {str(e)}")
    
    if description is not None:
        conversion.description = description
    
    if is_active is not None:
        conversion.is_active = is_active
    
    # 更新时间戳
    conversion.updated_at = datetime.now()
    
    db.commit()
    db.refresh(conversion)
    
    # 重新加载配置
    config_manager.reload(db)
    
    return {
        "id": conversion.id,
        "source_unit": conversion.source_unit,
        "target_unit": conversion.target_unit,
        "conversion_formula": conversion.conversion_formula,
        "description": conversion.description,
        "is_active": conversion.is_active,
        "created_at": conversion.created_at,
        "updated_at": conversion.updated_at,
        "message": "转换规则更新成功"
    }

@router.delete("/unit-conversions/{conversion_id}", summary="删除单位转换规则")
async def delete_unit_conversion(
    conversion_id: int = Path(..., description="转换规则ID"),
    db: Session = Depends(get_db)
):
    """
    删除单位转换规则
    """
    # 获取转换规则
    conversion = db.query(UnitConversion).filter(UnitConversion.id == conversion_id).first()
    if not conversion:
        raise HTTPException(status_code=404, detail="转换规则不存在")
    
    # 删除转换规则
    db.delete(conversion)
    db.commit()
    
    # 重新加载配置
    config_manager.reload(db)
    
    return {"message": f"转换规则从 '{conversion.source_unit}' 到 '{conversion.target_unit}' 已删除"}

# 系统配置接口
@router.get("/system-config", summary="获取系统配置")
async def get_system_config(
    db: Session = Depends(get_db)
):
    """
    获取所有系统配置
    """
    configs = db.query(SystemConfig).all()
    return [
        {
            "id": config.id,
            "key": config.key,
            "value": config.value,
            "description": config.description,
            "updated_at": config.updated_at
        }
        for config in configs
    ]

@router.put("/system-config/{config_key}", summary="更新系统配置")
async def update_system_config(
    config_key: str = Path(..., description="配置键"),
    value: str = Form(..., description="配置值"),
    description: Optional[str] = Form(None, description="描述"),
    db: Session = Depends(get_db)
):
    """
    更新系统配置
    """
    # 查找配置
    config = db.query(SystemConfig).filter(SystemConfig.key == config_key).first()
    
    if not config:
        # 创建新配置
        config = SystemConfig(
            key=config_key,
            value=value,
            description=description
        )
        db.add(config)
    else:
        # 更新配置
        config.value = value
        if description is not None:
            config.description = description
        config.updated_at = datetime.now()
    
    db.commit()
    db.refresh(config)
    
    # 重新加载配置
    config_manager.reload(db)
    
    return {
        "id": config.id,
        "key": config.key,
        "value": config.value,
        "description": config.description,
        "updated_at": config.updated_at,
        "message": "系统配置更新成功"
    }

# 测试单位转换接口
@router.post("/test-unit-conversion", summary="测试单位转换")
async def test_unit_conversion(
    value: float = Form(..., description="要转换的值"),
    source_unit: str = Form(..., description="源单位"),
    db: Session = Depends(get_db)
):
    """
    测试单位转换，将给定值从源单位转换为标准单位
    """
    # 确保配置已加载
    if not hasattr(config_manager, '_is_initialized') or not config_manager._is_initialized:
        config_manager.initialize(db)
    
    try:
        # 进行单位转换
        converted_value, standard_unit = config_manager.convert_to_standard_unit(value, source_unit)
        
        return {
            "original_value": value,
            "original_unit": source_unit,
            "converted_value": converted_value,
            "standard_unit": standard_unit,
            "success": True
        }
    except Exception as e:
        return {
            "original_value": value,
            "original_unit": source_unit,
            "error": str(e),
            "success": False
        }

# 测试商品名称清洗接口
@router.post("/test-clean-product-name", summary="测试商品名称清洗")
async def test_clean_product_name(
    product_name: str = Form(..., description="商品名称"),
    db: Session = Depends(get_db)
):
    """
    测试商品名称清洗逻辑，返回清洗后的结果
    """
    # 确保配置已加载
    if not hasattr(config_manager, '_is_initialized') or not config_manager._is_initialized:
        config_manager.initialize(db)
    
    try:
        # 创建一个临时DataFrame进行处理
        df = pd.DataFrame([{"商品名称": product_name}])
        
        # 使用处理器进行清洗
        from .processor import clean_product_data
        result_df = clean_product_data(df, db)
        
        # 获取处理结果
        result = result_df.iloc[0].to_dict()
        
        return {
            "original_name": product_name,
            "cleaned_name": result.get("cleaned_name", ""),
            "brand": result.get("brand", ""),
            "specification": result.get("specification", ""),
            "unit": result.get("unit", ""),
            "price": result.get("price", 0),
            "standard_unit": result.get("standard_unit", ""),
            "standard_price": result.get("standard_price", 0),
            "confidence_score": result.get("confidence_score", 0),
            "success": True
        }
    except Exception as e:
        return {
            "original_name": product_name,
            "error": str(e),
            "success": False
        }

# 预览清洗结果文件
@router.get("/tasks/{task_id}/result-preview", summary="预览清洗结果文件")
async def preview_result_file(
    task_id: int = Path(..., description="任务ID"),
    sheet_name: Optional[str] = Query(None, description="工作表名称"),
    db: Session = Depends(get_db)
):
    """
    预览清洗结果文件，返回表头和数据
    """
    # 获取任务信息
    task = db.query(DataCleanTask).filter(DataCleanTask.id == task_id).first()
    if not task:
        raise HTTPException(status_code=404, detail="任务不存在")
    
    # 检查任务是否已完成
    if task.status != "completed":
        raise HTTPException(status_code=400, detail="任务尚未完成，无法预览结果")
    
    # 检查结果文件是否存在
    if not task.result_file_path or not os.path.exists(task.result_file_path):
        raise HTTPException(status_code=404, detail="结果文件不存在或已被删除")
    
    try:
        # 使用get_excel_preview函数获取预览数据
        preview_data = get_excel_preview(task.result_file_path, sheet_name)
        
        # 添加任务信息
        preview_data["task_info"] = {
            "id": task.id,
            "name": task.task_name,
            "status": task.status,
            "created_at": task.created_at.strftime('%Y-%m-%dT%H:%M:%S') if task.created_at else None,
            "completed_at": task.completed_at.strftime('%Y-%m-%dT%H:%M:%S') if task.completed_at else None
        }
        
        return preview_data
    except Exception as e:
        logger.error(f"预览结果文件失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"预览结果文件失败: {str(e)}")

# 获取结果统计信息
@router.get("/tasks/{task_id}/result-stats", summary="获取结果统计信息")
async def get_result_stats(
    task_id: int = Path(..., description="任务ID"),
    db: Session = Depends(get_db)
):
    """
    获取清洗结果的统计信息
    """
    # 获取任务信息
    task = db.query(DataCleanTask).filter(DataCleanTask.id == task_id).first()
    if not task:
        raise HTTPException(status_code=404, detail="任务不存在")
    
    # 检查任务是否已完成
    if task.status != "completed":
        raise HTTPException(status_code=400, detail="任务尚未完成，无法获取统计信息")
    
    # 检查结果文件是否存在
    if not task.result_file_path or not os.path.exists(task.result_file_path):
        raise HTTPException(status_code=404, detail="结果文件不存在或已被删除")
    
    try:
        # 读取Excel文件
        df = pd.read_excel(task.result_file_path)
        
        # 获取统计信息
        stats = get_product_stats(df, db)
        
        # 添加任务信息
        stats["task_info"] = {
            "id": task.id,
            "name": task.task_name,
            "status": task.status,
            "created_at": task.created_at.strftime('%Y-%m-%dT%H:%M:%S') if task.created_at else None,
            "completed_at": task.completed_at.strftime('%Y-%m-%dT%H:%M:%S') if task.completed_at else None,
            "total_rows": len(df)
        }
        
        # 添加标准单位和价格的统计
        if 'standard_unit' in df.columns and 'standard_price' in df.columns:
            # 标准单位分布
            standard_unit_counts = df[df['standard_unit'] != '']['standard_unit'].value_counts().to_dict()
            stats["standard_units_distribution"] = standard_unit_counts
            
            # 标准价格统计
            price_stats = {}
            for unit in standard_unit_counts.keys():
                unit_df = df[df['standard_unit'] == unit]
                if len(unit_df) > 0:
                    valid_prices = unit_df[unit_df['standard_price'] > 0]['standard_price']
                    if len(valid_prices) > 0:
                        price_stats[unit] = {
                            "min": float(valid_prices.min()),
                            "max": float(valid_prices.max()),
                            "mean": float(valid_prices.mean()),
                            "median": float(valid_prices.median())
                        }
            
            stats["standard_price_stats"] = price_stats
        
        # 添加置信度评分统计
        if 'confidence_score' in df.columns:
            confidence_stats = {
                "min": float(df['confidence_score'].min()),
                "max": float(df['confidence_score'].max()),
                "mean": float(df['confidence_score'].mean()),
                "median": float(df['confidence_score'].median())
            }
            
            # 置信度分布
            bins = [0, 20, 40, 60, 80, 100]
            labels = ['很低', '低', '中等', '高', '很高']
            df['confidence_level'] = pd.cut(df['confidence_score'], bins=bins, labels=labels)
            confidence_distribution = df['confidence_level'].value_counts().to_dict()
            
            stats["confidence_stats"] = confidence_stats
            stats["confidence_distribution"] = {str(k): v for k, v in confidence_distribution.items()}
        
        return stats
    except Exception as e:
        logger.error(f"获取结果统计信息失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"获取结果统计信息失败: {str(e)}")

@router.post("/extract-brands", summary="从商品列表中提取潜在品牌")
async def extract_brands(
    product_names: Optional[List[str]] = Form(None, description="商品名称列表"),
    product_names_json: Optional[str] = Form(None, description="JSON格式的商品名称列表"),
    use_hanlp: bool = Form(True, description="是否使用HanLP进行分词"),
    request: Request = None,
    db: Session = Depends(get_db)
):
    """
    从商品名称列表中提取潜在的品牌，并与现有品牌库进行对比
    
    返回已知品牌和未知品牌列表
    """
    try:
        # 检查是否是JSON请求
        if request and request.headers.get("content-type") == "application/json":
            try:
                # 读取JSON请求体
                json_data = await request.json()
                logger.info(f"++++++++++++++++++++++++++++++++++=收到JSON请求: {json_data}")
                
                # 检查几种可能的格式
                if "product_names" in json_data:
                    product_names = json_data["product_names"]
                    use_hanlp = json_data.get("use_hanlp", True)
                elif "names" in json_data:
                    product_names = json_data["names"]
                    use_hanlp = json_data.get("use_hanlp", True)
                elif "products" in json_data:
                    # 如果是objects数组格式，提取name字段
                    if isinstance(json_data["products"], list) and len(json_data["products"]) > 0:
                        if isinstance(json_data["products"][0], dict) and "product_name" in json_data["products"][0]:
                            product_names = [p["product_name"] for p in json_data["products"] if "product_name" in p]
                        elif isinstance(json_data["products"][0], dict) and "name" in json_data["products"][0]:
                            product_names = [p["name"] for p in json_data["products"] if "name" in p]
                    use_hanlp = json_data.get("use_hanlp", True)
            except Exception as e:
                logger.error(f"解析JSON请求体失败: {str(e)}")
                return {
                    "success": False,
                    "error": f"解析请求数据失败: {str(e)}"
                }
        # 尝试解析表单数据
        elif product_names_json:
            try:
                # 尝试解析JSON字符串
                import json
                product_names = json.loads(product_names_json)
            except json.JSONDecodeError as e:
                logger.error(f"JSON解析错误: {str(e)}")
                return {
                    "success": False,
                    "error": f"商品数据JSON格式错误: {str(e)}"
                }
        
        # 确保商品名称列表不为空
        if not product_names or len(product_names) == 0:
            return {
                "success": False,
                "error": "商品名称列表为空"
            }
            
        # 记录处理信息
        logger.info(f"开始处理 {len(product_names)} 个商品名称，使用HanLP: {use_hanlp}")
        
        # 确保配置管理器已初始化
        from .config_manager import config_manager
        if not hasattr(config_manager, '_is_initialized') or not config_manager._is_initialized:
            config_manager.initialize(db)
        
        # 获取品牌列表
        brand_list = config_manager.brands
        logger.info(f"从配置中获取到 {len(brand_list)} 个品牌")
        
        # 获取非品牌词列表
        non_brand_words = config_manager.common_products
        logger.info(f"从配置中获取到 {len(non_brand_words)} 个非品牌词")
        
        # 使用HanLP或者默认品牌识别器
        if use_hanlp:
            try:
                # 导入HanLP品牌识别器
                from .hanlp_brand_recognition import HanLPBrandRecognizer, HANLP_AVAILABLE
                
                if HANLP_AVAILABLE:
                    logger.info("使用HanLP品牌识别器进行处理")
                    # 创建HanLP品牌识别器
                    recognizer = HanLPBrandRecognizer(
                        brands=brand_list,
                        min_similarity=80.0,
                        use_hanlp=True,
                        use_pos_filter=True
                    )
                else:
                    logger.warning("HanLP未安装或初始化失败，回退到标准品牌识别器")
                    from .enhanced_brand_recognition import BrandRecognizer
                    # 创建标准品牌识别器
                    recognizer = BrandRecognizer(
                        brands=brand_list,
                        min_similarity=80.0
                    )
            except ImportError:
                logger.warning("导入HanLP品牌识别器失败，回退到标准品牌识别器")
                from .enhanced_brand_recognition import BrandRecognizer
                # 创建标准品牌识别器
                recognizer = BrandRecognizer(
                    brands=brand_list,
                    min_similarity=80.0
                )
        else:
            # 使用标准品牌识别器
            from .enhanced_brand_recognition import BrandRecognizer
            logger.info("使用标准品牌识别器进行处理")
            recognizer = BrandRecognizer(
                brands=brand_list,
                min_similarity=80.0
            )
        
        # 调用品牌提取方法
        result = recognizer.extract_potential_brands(product_names)
        
        # 过滤未知品牌列表，排除非品牌词库中的词汇
        original_unknown_count = len(result['unknown_brands'])
        filtered_unknown_brands = []
        
        # 创建一个非品牌词的集合用于高效查找
        non_brand_set = set(non_brand_words)
        
        for brand_item in result['unknown_brands']:
            brand_name = brand_item['brand']
            # 检查品牌名称是否在非品牌词库中
            if brand_name.lower() not in (word.lower() for word in non_brand_set):
                filtered_unknown_brands.append(brand_item)
        
        # 记录过滤情况
        filtered_count = original_unknown_count - len(filtered_unknown_brands)
        logger.info(f"从 {original_unknown_count} 个未知品牌中过滤掉 {filtered_count} 个非品牌词")
        
        # 更新结果中的未知品牌
        result['unknown_brands'] = filtered_unknown_brands
        
        logger.info(f"品牌提取完成，发现 {len(result['known_brands'])} 个已知品牌，{len(result['unknown_brands'])} 个未知品牌(过滤后)")
        
        return {
            "success": True,
            "data": {
                "known_brands": result["known_brands"],
                "unknown_brands": result["unknown_brands"],
                "non_brand_words": non_brand_words,  # 返回非品牌词库，前端可能会用到
                "processing_time": result["processing_time"],
                "brand_product_mapping": result.get("brand_product_mapping", {})
            },
            "message": "品牌提取成功",
            "code": 0
        }
    except Exception as e:
        import traceback
        logger.error(f"品牌提取失败: {str(e)}")
        logger.error(traceback.format_exc())
        return {
            "success": False,
            "error": str(e),
            "code": 500
        }

@router.post("/save-new-brands", summary="保存新的品牌到数据库")
async def save_new_brands(
    brands: Optional[List[str]] = Form(None, description="新品牌列表"),
    brands_json: Optional[str] = Form(None, description="JSON格式的新品牌列表"),
    category: str = Form("其他", description="品牌类别"),
    db: Session = Depends(get_db)
):
    """
    将新品牌保存到数据库中
    """
    try:
        # 调试日志
        logger.info(f"接收到的brands参数: {brands}")
        logger.info(f"接收到的brands_json参数: {brands_json}")
        logger.info(f"接收到的category参数: {category}")
        
        # 获取品牌列表
        if brands_json:
            try:
                # 尝试解析JSON字符串
                import json
                brands = json.loads(brands_json)
                logger.info(f"从JSON中解析出的品牌数量: {len(brands)}")
            except json.JSONDecodeError as e:
                logger.error(f"JSON解析错误: {str(e)}")
                return {
                    "success": False,
                    "error": f"品牌数据JSON格式错误: {str(e)}"
                }
                
        # 确保品牌列表不为空
        if not brands or len(brands) == 0:
            logger.error("品牌列表为空")
            return {
                "success": False,
                "error": "品牌列表为空"
            }
            
        # 记录处理信息
        logger.info(f"尝试保存 {len(brands)} 个新品牌，类别: {category}")
        
        from .models import BrandConfig
        
        # 查询现有品牌，避免重复
        existing_brands = [b.name.lower() for b in db.query(BrandConfig).all()]
        
        added_brands = []
        skipped_brands = []
        
        for brand in brands:
            if not brand or len(brand.strip()) < 2:
                skipped_brands.append({"brand": brand, "reason": "品牌名称过短"})
                continue
                
            if brand.lower() in existing_brands:
                skipped_brands.append({"brand": brand, "reason": "品牌已存在"})
                continue
            
            # 添加新品牌
            new_brand = BrandConfig(
                name=brand,
                category=category,
                is_active=True
            )
            db.add(new_brand)
            added_brands.append(brand)
            
        # 提交事务
        db.commit()
        
        # 刷新配置管理器
        from .config_manager import config_manager
        config_manager.reload(db)
        
        logger.info(f"成功添加 {len(added_brands)} 个新品牌，跳过 {len(skipped_brands)} 个")
        
        return {
            "success": True,
            "added_brands": added_brands,
            "skipped_brands": skipped_brands,
            "total_added": len(added_brands)
        }
    except Exception as e:
        db.rollback()
        import traceback
        logger.error(f"保存品牌失败: {str(e)}")
        logger.error(traceback.format_exc())
        return {
            "success": False,
            "error": str(e)
        }

@router.get("/brands", summary="获取所有品牌")
async def get_all_brands(
    keyword: Optional[str] = Query(None, description="搜索关键词"),
    category: Optional[str] = Query(None, description="品牌类别"),
    page: int = Query(1, description="页码", ge=1),
    page_size: int = Query(50, description="每页条数", ge=1, le=200),
    db: Session = Depends(get_db)
):
    """
    获取所有品牌配置，支持分页和过滤
    """
    try:
        from .models import BrandConfig
        
        # 构建查询
        query = db.query(BrandConfig)
        
        # 应用过滤
        if keyword:
            query = query.filter(BrandConfig.name.contains(keyword))
        if category:
            query = query.filter(BrandConfig.category == category)
        
        # 获取总数
        total = query.count()
        
        # 分页
        skip = (page - 1) * page_size
        brands = query.order_by(BrandConfig.name).offset(skip).limit(page_size).all()
        
        # 获取所有类别
        categories = db.query(BrandConfig.category).distinct().all()
        category_list = [c[0] for c in categories if c[0]]
        
        return {
            "success": True,
            "total": total,
            "page": page,
            "page_size": page_size,
            "brands": [
                {
                    "id": brand.id,
                    "name": brand.name,
                    "category": brand.category,
                    "is_active": brand.is_active
                }
                for brand in brands
            ],
            "categories": category_list
        }
    except Exception as e:
        logger.error(f"获取品牌列表失败: {str(e)}")
        return {
            "success": False,
            "error": str(e)
        }

@router.post("/save-non-brand-words", summary="保存非品牌词到数据库")
async def save_non_brand_words(
    words: Optional[List[str]] = Form(None, description="非品牌词列表"),
    words_json: Optional[str] = Form(None, description="JSON格式的非品牌词列表"),
    category: str = Form("描述词", description="词语类别"),
    db: Session = Depends(get_db)
):
    """
    将非品牌词保存到数据库中，用于后续分析时排除这些词
    """
    try:
        # 获取词语列表
        if words_json:
            try:
                # 尝试解析JSON字符串
                import json
                words = json.loads(words_json)
                logger.info(f"从JSON中解析出的非品牌词数量: {len(words)}")
            except json.JSONDecodeError as e:
                logger.error(f"JSON解析错误: {str(e)}")
                return {
                    "success": False,
                    "error": f"非品牌词数据JSON格式错误: {str(e)}"
                }
                
        # 确保词语列表不为空
        if not words or len(words) == 0:
            logger.error("非品牌词列表为空")
            return {
                "success": False,
                "error": "非品牌词列表为空"
            }
            
        # 记录处理信息
        logger.info(f"尝试保存 {len(words)} 个非品牌词，类别: {category}")
        
        from .models import CommonProductName
        
        # 查询现有非品牌词，避免重复
        existing_words = [w.name.lower() for w in db.query(CommonProductName).all()]
        
        added_words = []
        skipped_words = []
        
        for word in words:
            if not word or len(word.strip()) < 2:
                skipped_words.append({"word": word, "reason": "词语过短"})
                continue
                
            if word.lower() in existing_words:
                skipped_words.append({"word": word, "reason": "词语已存在"})
                continue
            
            # 添加新非品牌词
            new_word = CommonProductName(
                name=word,
                category=category,
                description="通过品牌提取器添加的非品牌词",
                is_active=True
            )
            db.add(new_word)
            added_words.append(word)
            
        # 提交事务
        db.commit()
        
        # 刷新配置管理器
        from .config_manager import config_manager
        config_manager.reload(db)
        
        logger.info(f"成功添加 {len(added_words)} 个非品牌词，跳过 {len(skipped_words)} 个")
        
        return {
            "success": True,
            "added_words": added_words,
            "skipped_words": skipped_words,
            "total_added": len(added_words)
        }
    except Exception as e:
        db.rollback()
        import traceback
        logger.error(f"保存非品牌词失败: {str(e)}")
        logger.error(traceback.format_exc())
        return {
            "success": False,
            "error": str(e)
        }

