import os
import io
import re
import logging
import shutil
import random
import string
import collections
from typing import List, Dict, Any, Optional, Union
from pathlib import Path
from datetime import datetime
from fastapi import APIRouter, Depends, HTTPException, Query, Form, File, UploadFile, Path, BackgroundTasks
from fastapi.responses import FileResponse, JSONResponse
from PIL import Image

# 导入YAML的FullLoader，允许加载Python对象如元组
import yaml
from yaml import SafeLoader, FullLoader

from app.core.config import settings
from app.models.product import ProductResponse, ProductDetail
from app.utils.file_utils import FileManager
from app.utils.ocr_utils import OCRClient
from app.utils.llm_utils import LLMProcessor
from app.utils.ai_utils import AIClient
from app.utils.document_utils import DocumentProcessor
from langchain_huggingface import HuggingFaceEmbeddings

router = APIRouter()


@router.get("/products", response_model=List[ProductResponse])
async def get_products():
    """获取所有产品列表"""
    products = await FileManager.get_all_products()
    return products


@router.get("/products/{product_id}", response_model=ProductDetail)
async def get_product(product_id: str):
    """获取产品详情"""
    product = await FileManager.get_product_detail(product_id)
    
    if "error" in product:
        raise HTTPException(status_code=404, detail=product["error"])
    
    return product


@router.post("/products", response_model=ProductResponse)
async def create_product(name: str = Form(...)):
    """创建新产品"""
    # 创建产品文件夹
    product_folder = await FileManager.create_product_folder(name)
    
    # 获取文件夹名称（产品ID）
    product_id = os.path.basename(product_folder)
    
    return {
        "id": product_id,
        "name": name,
        "has_analysis": False,
        "image_count": 0,
        "created_time": os.path.getctime(product_folder)
    }


@router.post("/products/{product_id}/files")
async def upload_product_files(
    product_id: str,
    files: List[UploadFile] = File(...),
    background_tasks: BackgroundTasks = None
):
    """上传产品相关文件，支持多种文件类型"""
    try:
        # 检查产品是否存在
        product = await FileManager.get_product_detail(product_id)
        if "error" in product:
            logging.error(f"产品不存在: {product_id}, 错误: {product['error']}")
            raise HTTPException(status_code=404, detail=product["error"])

        # 使用现有产品文件夹
        folder_path = product["folder_path"]
        
        # 检查是否有文件
        if not files or len(files) == 0:
            logging.error(f"上传文件为空: product_id={product_id}")
            raise HTTPException(status_code=400, detail="没有上传文件")
            
        uploaded_files = []
        
        # 处理每个文件
        for file in files:
            try:
                # 检查文件类型
                file_ext = os.path.splitext(file.filename)[1].lower()
                logging.info(f"正在处理文件: {file.filename}, 类型: {file_ext}")
                
                if file_ext not in settings.ALLOWED_FILE_EXTENSIONS:
                    logging.warning(f"不支持的文件类型: {file.filename}, 类型: {file_ext}")
                    raise HTTPException(
                        status_code=400,
                        detail=f"不支持的文件类型: {file_ext}"
                    )
                
                # 保存文件
                file_path = await FileManager.save_uploaded_file(file, folder_path)
                logging.info(f"文件已保存: {file.filename} -> {file_path}")
                uploaded_files.append(os.path.basename(file_path))
                
                # 根据文件类型选择处理方式
                if file_ext in settings.IMAGE_EXTENSIONS:
                    # 图片文件使用OCR处理
                    if background_tasks:
                        logging.info(f"添加OCR处理任务: {file_path}")
                        background_tasks.add_task(OCRClient.process_image, file_path)
                else:
                    # 其他文档文件使用文档处理器
                    if background_tasks:
                        logging.info(f"添加文档处理任务: {file_path}")
                        background_tasks.add_task(DocumentProcessor.process_document, file_path)
            except Exception as file_error:
                logging.error(f"处理文件失败: {file.filename}, 错误: {str(file_error)}")
                # 继续处理其他文件，不中断整个上传过程
                continue
        
        if not uploaded_files:
            logging.error(f"没有成功上传任何文件: product_id={product_id}")
            raise HTTPException(status_code=400, detail="没有成功上传任何文件")
            
        return {
            "status": "success",
            "message": "文件上传成功，正在处理中",
            "files": uploaded_files
        }
    except HTTPException as http_error:
        # 重新抛出HTTP异常
        raise http_error
    except Exception as e:
        logging.error(f"上传文件失败: product_id={product_id}, 错误: {str(e)}")
        raise HTTPException(status_code=500, detail=f"上传文件失败: {str(e)}")


@router.get("/products/{product_id}/files")
async def get_product_files(product_id: str):
    """获取产品的所有文件信息"""
    try:
        product = await FileManager.get_product_detail(product_id)
        if not product:
            raise HTTPException(status_code=404, detail="产品不存在")
            
        files = await FileManager.get_product_files(product_id)
        return files
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))


@router.get("/products/{product_id}/files/{file_id}/content")
async def get_file_content(product_id: str, file_id: str):
    """获取文件内容"""
    try:
        content = await FileManager.get_file_content(product_id, file_id)
        if not content:
            raise HTTPException(status_code=404, detail="文件内容不存在")
        return content
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))


@router.get("/products/{product_id}/image-texts", response_model=List[Dict[str, Any]])
async def get_product_image_texts(product_id: str):
    """
    获取产品的所有图片OCR文本内容
    
    Args:
        product_id (str): 产品ID
        
    Returns:
        List[Dict[str, Any]]: 图片OCR文本内容列表
    """
    image_texts = FileManager.get_product_image_texts(product_id)
    return image_texts


@router.post("/products/{product_id}/analyze", response_model=Dict)
async def analyze_product(product_id: str, background_tasks: BackgroundTasks):
    """分析产品图片并生成分析结果"""
    try:
        # 检查产品是否存在
        product = await FileManager.get_product_detail(product_id)
        if "error" in product:
            raise HTTPException(status_code=404, detail=product["error"])
        
        # 获取分析结果文件路径
        result_file = os.path.join(product["folder_path"], "analysis_result.yaml")
        
        # 读取现有的分析结果（如果存在）
        existing_texts = {}
        if os.path.exists(result_file):
            with open(result_file, 'r', encoding='utf-8') as f:
                data = yaml.safe_load(f) or {}
                existing_texts = data.get("image_texts", {})
        
        # 检查所有文件是否都已提取内容
        all_files = []
        missing_files = []
        
        # 遍历文件夹中的所有文件
        for filename in os.listdir(product["folder_path"]):
            file_path = os.path.join(product["folder_path"], filename)
            if os.path.isfile(file_path):
                file_ext = os.path.splitext(filename)[1].lower()
                if file_ext in settings.IMAGE_EXTENSIONS:
                    all_files.append(filename)
                    # 检查是否已有提取内容
                    if filename not in existing_texts or not existing_texts[filename]:
                        missing_files.append(file_path)
                elif file_ext in ['.pdf', '.docx', '.doc', '.txt']:
                    all_files.append(filename)
                    # 检查是否已有提取内容
                    if filename not in existing_texts or not existing_texts[filename]:
                        missing_files.append(file_path)
        
        if not all_files:
            raise HTTPException(status_code=400, detail="没有可分析的文件")
        
        # 如果有未提取内容的文件，先进行提取
        if missing_files:
            # 根据文件类型分别处理
            for file_path in missing_files:
                file_ext = os.path.splitext(file_path)[1].lower()
                if file_ext in settings.IMAGE_EXTENSIONS:
                    # 图片文件使用OCR处理
                    text_content = await OCRClient.process_image(file_path)
                else:
                    # 其他文档使用文档处理器
                    doc_result = await DocumentProcessor.process_document(file_path)
                    text_content = doc_result.get("content", "")
                
                # 更新提取的内容
                filename = os.path.basename(file_path)
                existing_texts[filename] = text_content
            
            # 保存更新后的内容
            await FileManager.save_analysis_result(
                product["folder_path"],
                product.get("analysis_result", ""),  # 保持原有的分析结果
                existing_texts  # 更新的文本内容
            )
        
        # 准备所有文本内容用于分析
        analysis_texts = []
        for filename in all_files:
            if filename in existing_texts and existing_texts[filename]:
                analysis_texts.append(existing_texts[filename])
        
        if not analysis_texts:
            raise HTTPException(status_code=400, detail="没有可用的文本内容进行分析")
        
        # 使用AI进行分析
        try:
            analysis_result = await LLMProcessor.analyze_product("\n".join(analysis_texts))
            
            # 保存分析结果
            await FileManager.save_analysis_result(
                product["folder_path"],
                analysis_result,
                existing_texts
            )
            
            # 更新向量存储
            background_tasks.add_task(update_product_vectors, product_id)
            
            return {
                "status": "success",
                "message": "分析完成",
                "analyzed_files": len(all_files),
                "extracted_texts": len(analysis_texts)
            }
            
        except Exception as e:
            raise HTTPException(status_code=500, detail=f"AI分析失败: {str(e)}")
            
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))


@router.post("/products/{product_id}/images/{image_name}/extract", response_model=Dict)
async def extract_image_text(
    product_id: str, 
    image_name: str = Path(..., description="图片文件名")
):
    """
    提取单个图片的OCR文本内容
    
    Args:
        product_id: 产品ID
        image_name: 图片文件名
    
    Returns:
        Dict: 包含状态和提取的文本内容
    """
    # 检查产品是否存在
    product = await FileManager.get_product_detail(product_id)
    if "error" in product:
        raise HTTPException(status_code=404, detail=product["error"])
    
    # 检查图片是否存在
    if image_name not in product["images"]:
        raise HTTPException(status_code=404, detail="图片不存在")
    
    # 获取图片路径
    image_path = os.path.join(product["folder_path"], image_name)
    
    try:
        # 提取图片文本
        text_content = await OCRClient.process_image(image_path)
        
        # 准备OCR结果字典
        ocr_results = {image_name: text_content}
        
        # 获取已有分析结果，如果存在
        result_file = os.path.join(product["folder_path"], "analysis_result.yaml")
        analysis_result = None
        if os.path.exists(result_file):
            with open(result_file, "r", encoding="utf-8") as f:
                analysis_data = yaml.safe_load(f)
                if analysis_data and isinstance(analysis_data, dict):
                    analysis_result = analysis_data.get("analysis_result")
        
        # 保存OCR结果
        success, message = await OCRClient.save_ocr_results(
            product["folder_path"],
            ocr_results
        )
        
        if not success:
            raise Exception(message)
        
        return {
            "status": "success", 
            "image": image_name,
            "text": text_content
        }
    except Exception as e:
        print(f"OCR处理失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"提取图片内容失败: {str(e)}")


@router.delete("/products/{product_id}", response_model=Dict)
async def delete_product(product_id: str, background_tasks: BackgroundTasks):
    """删除产品"""
    try:
        # 检查产品是否存在
        product = await FileManager.get_product_detail(product_id)
        if "error" in product:
            raise HTTPException(status_code=404, detail=product["error"])
        
        # 删除产品文件夹
        import shutil
        shutil.rmtree(product["folder_path"])
        
        # 删除向量存储中的产品数据
        from app.utils.document_processor import ProductDocumentProcessor
        background_tasks.add_task(ProductDocumentProcessor.delete_product_vectors, product_id)
        
        return {"status": "success", "message": f"产品 {product_id} 已删除"}
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))


async def update_product_vectors(product_id: str):
    """更新产品向量存储（后台任务）"""
    try:
        from app.utils.document_processor import ProductDocumentProcessor
        await ProductDocumentProcessor.update_product_vectors(product_id)
    except Exception as e:
        logging.error(f"更新产品向量失败: {str(e)}")


@router.get("/compare-products", response_model=dict)
async def compare_products(products: str = Query(..., description="逗号分隔的产品ID列表")):
    """
    获取产品对比数据
    
    Args:
        products: 逗号分隔的产品ID列表
    
    Returns:
        包含对比数据的字典
    """
    product_ids = products.split(',')
    
    if len(product_ids) < 2:
        raise HTTPException(status_code=400, detail="至少需要两个产品进行对比")
    
    if len(product_ids) > 4:
        raise HTTPException(status_code=400, detail="最多可以对比四个产品")
    
    # 获取产品详情
    product_details = []
    for product_id in product_ids:
        product = await FileManager.get_product_detail(product_id)
        if "error" in product:
            raise HTTPException(status_code=404, detail=f"产品 {product_id} 不存在")
        
        # 检查是否已完成分析
        if not product.get("analysis_result"):
            raise HTTPException(status_code=400, detail=f"产品 {product['name']} 尚未完成分析")
        
        product_details.append(product)
    
    # 提取分析结果
    products_data = []
    for product in product_details:
        # 从YAML分析结果中提取特征数据
        result_file = os.path.join(product["folder_path"], "analysis_result.yaml")
        if os.path.exists(result_file):
            with open(result_file, 'r', encoding='utf-8') as f:
                result_data = yaml.safe_load(f) or {}
            
            # 处理分析结果
            analysis_result = result_data.get("analysis_result", "")
            
            # 尝试从分析结果中提取结构化数据
            try:
                # 这里假设分析结果包含结构化的特征和评分信息
                # 在实际应用中，您可能需要根据实际的分析结果格式进行调整
                features = extract_features_from_analysis(analysis_result)
                scores = extract_scores_from_analysis(analysis_result)
                advantages = extract_advantages_from_analysis(analysis_result)
                
                # 添加产品数据
                products_data.append({
                    "id": product["id"],
                    "name": product["name"],
                    "features": features,
                    "scores": scores,
                    "advantages": advantages,
                    "image": product["images"][0] if product["images"] else None,
                    "analysis": {
                        "analysis_result": analysis_result  # 添加原始分析结果
                    }
                })
            except Exception as e:
                print(f"提取产品 {product['name']} 的特征数据失败: {str(e)}")
                # 如果提取失败，仍然添加产品基本信息
                products_data.append({
                    "id": product["id"],
                    "name": product["name"],
                    "features": {},
                    "scores": {},
                    "advantages": [],
                    "image": product["images"][0] if product["images"] else None,
                    "analysis": {
                        "analysis_result": analysis_result  # 添加原始分析结果
                    }
                })
    
    # 确定每个特征的最佳产品
    features_data = identify_feature_categories(products_data)
    
    # 生成假设最优产品
    hypothetical_product = generate_hypothetical_product(products_data, features_data)
    
    # 返回对比数据
    return {
        "products": products_data,
        "features": features_data,
        "hypothetical_product": hypothetical_product
    }


# 辅助函数：从分析结果中提取特征
def extract_features_from_analysis(analysis_text):
    """从分析文本中提取产品特征"""
    features = {}
    
    # 根据新的Markdown格式提取特征
    # 提取技术参数部分
    tech_params_match = re.search(r'### 技术参数(.*?)(?=###|$)', analysis_text, re.DOTALL)
    if tech_params_match:
        tech_params = tech_params_match.group(1).strip()
        
        # 提取材质、认证等信息
        material_match = re.search(r'\*\*材质\*\*：\s*(.*?)(?=\n|$)', tech_params)
        if material_match:
            features["材质"] = material_match.group(1).strip()
        
        cert_match = re.search(r'\*\*认证\*\*：\s*(.*?)(?=\n|$)', tech_params)
        if cert_match:
            features["认证"] = cert_match.group(1).strip()
            
        safety_match = re.search(r'\*\*安全性\*\*：\s*(.*?)(?=\n|$)', tech_params)
        if safety_match:
            features["安全性"] = safety_match.group(1).strip()
    
    # 提取主要功能特点
    features_match = re.search(r'### 主要功能特点(.*?)(?=###|$)', analysis_text, re.DOTALL)
    if features_match:
        features_text = features_match.group(1).strip()
        
        # 提取每个特点
        feature_items = re.findall(r'\*\*([^*]+)\*\*：\s*(.*?)(?=\n-|\n\n|$)', features_text, re.DOTALL)
        for name, value in feature_items:
            feature_name = name.strip()
            # 限制特征名称长度
            if len(feature_name) <= 20:  # 避免过长的特征名
                features[feature_name] = value.strip()
    
    # 提取价格定位（如果有）
    price_match = re.search(r'\*\*价格定位\*\*：\s*(.*?)(?=\n|$)', analysis_text)
    if price_match:
        features["价格"] = price_match.group(1).strip()
    
    return features


# 辅助函数：从分析结果中提取评分
def extract_scores_from_analysis(analysis_text):
    """从分析文本中提取产品评分"""
    # 基础评分
    scores = {
        "设计": 8,
        "功能": 8,
        "性价比": 7,
        "质量": 8
    }
    
    # 根据功能特点和优势数量调整评分
    features_count = len(re.findall(r'- \*\*', analysis_text))
    if features_count > 5:
        scores["功能"] = min(10, scores["功能"] + 1)
    
    # 根据市场优势调整评分
    advantages_match = re.search(r'### 市场优势(.*?)(?=###|$)', analysis_text, re.DOTALL)
    if advantages_match:
        advantages_text = advantages_match.group(1).strip()
        advantages_count = len(re.findall(r'- \*\*', advantages_text))
        
        # 根据优势数量调整评分
        if advantages_count > 3:
            scores["性价比"] = min(10, scores["性价比"] + 1)
            scores["质量"] = min(10, scores["质量"] + 1)
    
    # 根据核心亮点调整评分
    highlights_match = re.search(r'### 核心亮点总结(.*?)(?=###|$)', analysis_text, re.DOTALL)
    if highlights_match:
        highlights_text = highlights_match.group(1).strip()
        highlights_count = len(re.findall(r'- \*\*', highlights_text))
        
        # 根据亮点数量调整评分
        if highlights_count > 3:
            scores["设计"] = min(10, scores["设计"] + 1)
    
    # 搜索是否包含某些关键词，调整相应评分
    if re.search(r'创新|领先|突破|优秀|卓越|高端', analysis_text, re.IGNORECASE):
        for key in scores.keys():
            scores[key] = min(10, scores[key] + 0.5)
    
    return scores


# 辅助函数：从分析结果中提取优势
def extract_advantages_from_analysis(analysis_text):
    """从分析文本中提取产品优势"""
    advantages = []
    
    # 从市场优势部分提取
    market_adv_match = re.search(r'### 市场优势(.*?)(?=###|$)', analysis_text, re.DOTALL)
    if market_adv_match:
        market_advantages = market_adv_match.group(1).strip()
        
        # 提取每条优势
        adv_items = re.findall(r'- \*\*([^*]+)\*\*：\s*(.*?)(?=\n-|\n\n|$)', market_advantages, re.DOTALL)
        for name, desc in adv_items:
            advantage = f"{name.strip()}：{desc.strip()}"
            advantages.append(advantage)
    
    # 从核心亮点总结部分提取
    highlights_match = re.search(r'### 核心亮点总结(.*?)(?=###|$)', analysis_text, re.DOTALL)
    if highlights_match:
        highlights = highlights_match.group(1).strip()
        
        # 提取每个亮点
        highlight_items = re.findall(r'- \*\*([^*]+)\*\*：\s*(.*?)(?=\n-|\n\n|$)', highlights, re.DOTALL)
        for name, desc in highlight_items:
            # 避免与市场优势重复
            highlight = f"{name.strip()}：{desc.strip()}"
            if highlight not in advantages:
                advantages.append(highlight)
    
    # 如果还是没有足够的优势，从主要功能特点部分提取
    if len(advantages) < 3:
        features_match = re.search(r'### 主要功能特点(.*?)(?=###|$)', analysis_text, re.DOTALL)
        if features_match:
            features = features_match.group(1).strip()
            
            # 提取每个特点
            feature_items = re.findall(r'- \*\*([^*]+)\*\*：\s*(.*?)(?=\n-|\n\n|$)', features, re.DOTALL)
            for name, desc in feature_items:
                feature = f"{name.strip()}：{desc.strip()}"
                if feature not in advantages:
                    advantages.append(feature)
                    if len(advantages) >= 5:  # 最多提取到5个
                        break
    
    # 限制优势数量
    return advantages[:5]


# 辅助函数：识别特征类别
def identify_feature_categories(products_data):
    """识别所有产品共有的特征类别并确定每个特征的最佳产品"""
    # 合并所有产品的特征
    all_features = {}
    for product in products_data:
        for feature_name, feature_value in product.get("features", {}).items():
            if feature_name not in all_features:
                all_features[feature_name] = []
            all_features[feature_name].append({
                "product_id": product["id"],
                "value": feature_value
            })
    
    # 构建特征类别列表
    features_data = []
    categories = {
        "基本信息": ["价格", "尺寸", "重量", "材质"],
        "设计特性": ["设计", "外观", "颜色", "配色", "结构"],
        "功能特性": ["功能", "性能", "特色", "特点"]
    }
    
    # 为每个特征分配类别
    feature_id = 1
    for feature_name, values in all_features.items():
        # 确定特征类别
        category = "其他特性"
        for cat_name, cat_features in categories.items():
            if any(f in feature_name for f in cat_features):
                category = cat_name
                break
        
        # 找出最佳产品（仅作示例，实际应用中需要更复杂的逻辑）
        best_product = values[0]["product_id"] if values else None
        
        # 构建假设特征值（最佳特征值或组合）
        hypothetical_value = values[0]["value"] if values else ""
        
        features_data.append({
            "id": str(feature_id),
            "name": feature_name,
            "category": category,
            "best_product": best_product,
            "hypothetical_value": hypothetical_value
        })
        
        feature_id += 1
    
    # 按类别排序
    features_data.sort(key=lambda x: list(categories.keys()).index(x["category"]) if x["category"] in categories else 999)
    
    return features_data


# 辅助函数：生成假设最优产品
def generate_hypothetical_product(products_data, features_data):
    """根据所有产品的特征生成一个假设的最优产品"""
    # 提取最佳特征
    best_features = []
    for feature in features_data:
        best_features.append({
            "name": feature["name"],
            "value": feature["hypothetical_value"]
        })
    
    # 生成产品描述
    description = "基于对比分析，我们建议开发一款综合了现有产品优势的新产品，融合各产品的最佳特性，同时避免其缺点。"
    
    # 生成市场差异化策略
    strategy = "建议在保持核心竞争力的同时，通过独特的设计和功能组合实现差异化定位，针对目标用户群体的特定需求提供更有针对性的解决方案。"
    
    return {
        "description": description,
        "features": best_features,
        "strategy": strategy
    }


@router.delete("/products/{product_id}/images/{image_name}", response_model=dict)
async def delete_product_image(product_id: str, image_name: str, background_tasks: BackgroundTasks):
    """删除产品图片"""
    # 检查产品是否存在
    product = await FileManager.get_product_detail(product_id)
    if "error" in product:
        raise HTTPException(status_code=404, detail=product["error"])
    
    # 检查图片是否存在
    image_path = os.path.join(product["folder_path"], image_name)
    if not os.path.exists(image_path):
        raise HTTPException(status_code=404, detail=f"图片 {image_name} 不存在")
    
    try:
        # 删除图片文件
        os.remove(image_path)
        
        # 从分析结果中移除图片的OCR文本
        result_file = os.path.join(product["folder_path"], "analysis_result.yaml")
        if os.path.exists(result_file):
            with open(result_file, 'r', encoding='utf-8') as f:
                result_data = yaml.safe_load(f) or {}
            
            # 如果有图片文本数据，移除相应图片的文本
            if "image_texts" in result_data and image_name in result_data["image_texts"]:
                del result_data["image_texts"][image_name]
                
                # 更新分析结果文件
                with open(result_file, 'w', encoding='utf-8') as f:
                    yaml.dump(result_data, f, allow_unicode=True, Dumper=yaml.SafeDumper)
        
        # 更新向量存储
        background_tasks.add_task(update_product_vectors, product_id)
        
        return {"message": f"图片 {image_name} 已删除"}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"删除图片失败: {str(e)}")


@router.get("/product/static-products", response_model=List[ProductResponse])
async def get_static_products():
    """从static目录获取产品列表，返回格式与get_products一致"""
    try:
        products = []
        static_products_dir = os.path.join(settings.STATIC_DIR, "uploads", "products")
        print(f"Static products directory: {static_products_dir}")
        # 检查目录是否存在
        if not os.path.exists(static_products_dir):
            logging.warning(f"静态产品目录不存在: {static_products_dir}")
            return []
        
        # 遍历所有产品文件夹
        for folder_name in os.listdir(static_products_dir):
            folder_path = os.path.join(static_products_dir, folder_name)
            if os.path.isdir(folder_path):
                # 获取产品名称(去掉UUID部分)
                product_name = folder_name.split("_")[0] if "_" in folder_name else folder_name
                
                # 检查是否有分析结果
                result_file = os.path.join(folder_path, "analysis_result.yaml")
                has_analysis = os.path.exists(result_file)
                
                # 获取图片数量
                image_count = len(glob.glob(os.path.join(folder_path, "*.jpg"))) + \
                             len(glob.glob(os.path.join(folder_path, "*.jpeg"))) + \
                             len(glob.glob(os.path.join(folder_path, "*.png")))
                
                products.append({
                    "id": folder_name,
                    "name": product_name,
                    "has_analysis": has_analysis,
                    "image_count": image_count,
                    "created_time": os.path.getctime(folder_path)
                })
        
        # 按创建时间排序
        products.sort(key=lambda x: x["created_time"], reverse=True)
        logging.info(f"Found {len(products)} products in static directory")
        
        return products
    except Exception as e:
        logging.error(f"Failed to get static products: {str(e)}")
        return []


@router.get("/static-product/{product_id}")
async def get_static_product_detail(product_id: str):
    """获取静态产品详情"""
    try:
        folder_path = os.path.join(settings.STATIC_DIR, "uploads", "products", product_id)
        if not os.path.exists(folder_path):
            return {"error": "产品不存在"}
        
        # 获取产品名称
        product_name = product_id.split("_")[0] if "_" in product_id else product_id
        
        # 获取图片列表
        images = []
        for ext in ['.jpg', '.jpeg', '.png']:
            images.extend([os.path.basename(f) for f in glob.glob(os.path.join(folder_path, f"*{ext}"))])
        
        # 获取分析结果
        result_file = os.path.join(folder_path, "analysis_result.yaml")
        analysis_result = None
        if os.path.exists(result_file):
            with open(result_file, "r", encoding="utf-8") as f:
                analysis_data = yaml.safe_load(f)
                analysis_result = analysis_data.get("analysis_result") if analysis_data else None
        
        return {
            "id": product_id,
            "name": product_name,
            "folder_path": folder_path.replace("\\", "/"),
            "images": images,
            "analysis_result": analysis_result,
            "created_time": os.path.getctime(folder_path)
        }
    except Exception as e:
        return {"error": str(e), "message": "获取静态产品详情失败"}


@router.get("/static")
async def get_static_products():
    """获取静态示例产品列表，用于演示"""
    try:
        # 模拟产品列表
        products = [
            {
                "id": "纸尿裤_a2a26a5b",  
                "name": "婴儿纸尿裤",
                "image_count": 3,
                "has_analysis": True,
                "created_time": datetime.now().timestamp()
            },
            {
                "id": "奶粉_b5c37f9a",
                "name": "婴儿奶粉",  
                "image_count": 5,
                "has_analysis": True,
                "created_time": datetime.now().timestamp()
            },
            {
                "id": "玩具_d7e48b1c",
                "name": "儿童积木玩具",
                "image_count": 4,
                "has_analysis": False,
                "created_time": datetime.now().timestamp()
            }
        ]
        return products
    except Exception as e:
        logging.error(f"获取静态示例产品失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@router.get("/{product_id}/files/{file_name}")
async def get_product_file(product_id: str, file_name: str):
    """
    获取产品目录下的文件
    
    参数:
    - product_id: 产品ID
    - file_name: 文件名
    
    返回:
    - 文件内容
    """
    try:
        import os
        
        # 检查产品是否存在
        product_folder = os.path.join(settings.PRODUCTS_DIR, product_id)
        if not os.path.exists(product_folder):
            raise HTTPException(status_code=404, detail="产品不存在")
        
        # 构建文件路径
        file_path = os.path.join(product_folder, file_name)
        
        # 检查文件是否存在
        if not os.path.exists(file_path):
            raise HTTPException(status_code=404, detail=f"文件 {file_name} 不存在")
        
        # 检查文件类型来设置媒体类型
        file_ext = os.path.splitext(file_name)[1].lower()
        media_type = None
        
        if file_ext in ['.jpg', '.jpeg']:
            media_type = "image/jpeg"
        elif file_ext == '.png':
            media_type = "image/png"
        elif file_ext == '.gif':
            media_type = "image/gif"
        elif file_ext == '.pdf':
            media_type = "application/pdf"
        elif file_ext == '.txt':
            media_type = "text/plain"
        elif file_ext == '.yaml' or file_ext == '.yml':
            media_type = "application/x-yaml"
        
        # 返回文件
        return FileResponse(file_path, media_type=media_type, filename=file_name)
        
    except Exception as e:
        logging.error(f"获取产品文件失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@router.get("/{product_id}/wordcloud")
async def generate_wordcloud(
    product_id: str,
    max_words: int = Query(100, ge=20, le=200),
    color_scheme: str = Query("default", regex="^(default|colorful|warm|cool)$"),
    shape: str = Query("rectangle", regex="^(circle|rectangle)$"),
    force_regenerate: bool = Query(False, description="是否强制重新生成词云图")
):
    """
    生成产品分析文本的词云图
    
    参数:
    - product_id: 产品ID
    - max_words: 词云中最大显示的词数量，默认100
    - color_scheme: 颜色方案，可选 default(默认蓝色系)、colorful(多彩色系)、warm(暖色系)、cool(冷色系)
    - shape: 词云形状，可选 circle(圆形)、rectangle(矩形)
    - force_regenerate: 是否强制重新生成词云图，默认False，如果词云图已存在则直接返回
    
    返回:
    - 生成的词云图片URL
    """
    try:
        import os
        import jieba
        import numpy as np
        import matplotlib
        matplotlib.use('Agg')  # 非交互式后端
        import matplotlib.pyplot as plt
        from wordcloud import WordCloud, STOPWORDS
        import yaml
        from PIL import Image
        
        # 检查产品是否存在
        product_folder = os.path.join(settings.PRODUCTS_DIR, product_id)
        if not os.path.exists(product_folder):
            raise HTTPException(status_code=404, detail="产品不存在")
        
        # 词云图文件名和路径
        filename = f"wordcloud_{color_scheme}_{shape}_{max_words}.png"
        wordcloud_path = os.path.join(product_folder, filename)
        
        # 检查词云图是否已存在
        if os.path.exists(wordcloud_path) and not force_regenerate:
            # 词云图已存在，直接返回
            logging.info(f"词云图已存在，直接返回: {wordcloud_path}")
            image_url = f"/api/products/{product_id}/files/{filename}"
            return {
                "status": "success",
                "image_url": image_url,
                "product_id": product_id,
                "is_cached": True
            }
        
        # 读取分析结果文件
        analysis_file = os.path.join(product_folder, "analysis_result.yaml")
        if not os.path.exists(analysis_file):
            raise HTTPException(status_code=404, detail="产品分析结果不存在")
            
        # 加载YAML文件
        with open(analysis_file, 'r', encoding='utf-8') as f:
            analysis_data = yaml.safe_load(f) or {}
        
        # 获取产品分析文本
        analysis_text = analysis_data.get('analysis_result', '')
        if not analysis_text:
            raise HTTPException(status_code=400, detail="产品分析文本为空")
        
        # 设置停用词（过滤常见无意义词汇）
        stopwords = set(STOPWORDS)
        # 添加中文停用词
        chinese_stopwords = {'的', '了', '是', '在', '和', '与', '以', '及', '或', '等', '对', '能', '都', '为', '有', '这', '那', '这些', '那些', '中', '上', '下', '前', '后', '内', '外', '再', '又', '也', '但', '不', '很', '非常', '更', '最', '因为', '所以', '可以', '可能', '应该', '会', '通过', '使用', '如果', '因此', '由于', '同时', '并且', '以及', '此外', '而且', '但是', '然而'}
        stopwords.update(chinese_stopwords)
        
        # 使用jieba分词
        words = jieba.cut(analysis_text, cut_all=False)
        
        # 过滤停用词
        words_filtered = ' '.join([word for word in words if len(word) > 1 and word not in stopwords])
        
        # 设置颜色方案
        if color_scheme == "colorful":
            colormap = 'viridis'
        elif color_scheme == "warm":
            colormap = 'Oranges'
        elif color_scheme == "cool":
            colormap = 'Blues'
        else:  # default
            colormap = 'PuBu'
        
        # 设置形状
        if shape == "circle":
            mask = np.array(Image.new('RGB', (500, 500), (255, 255, 255)))
            # 创建圆形蒙版
            center_x, center_y = 250, 250
            radius = 240
            for x in range(500):
                for y in range(500):
                    if (x - center_x) ** 2 + (y - center_y) ** 2 > radius ** 2:
                        mask[y, x, 0] = mask[y, x, 1] = mask[y, x, 2] = 0
        else:  # rectangle
            mask = None
        
        # 创建词云对象
        wc = WordCloud(
            font_path=os.path.join(settings.STATIC_DIR, 'fonts', 'simhei.ttf'),  # 使用中文字体
            max_words=max_words,
            width=800,
            height=400,
            background_color='white',
            stopwords=stopwords,
            mask=mask,
            colormap=colormap,
            collocations=False  # 避免重复词语
        )
        
        # 生成词云
        wc.generate(words_filtered)
        
        # 保存词云图片到产品目录下
        plt.figure(figsize=(10, 5))
        plt.imshow(wc, interpolation='bilinear')
        plt.axis('off')
        plt.tight_layout(pad=0)
        plt.savefig(wordcloud_path, dpi=300, bbox_inches='tight')
        plt.close()
        
        # 同时在静态目录保存一份用于缓存
        static_wordclouds_dir = os.path.join(settings.STATIC_DIR, 'wordclouds')
        os.makedirs(static_wordclouds_dir, exist_ok=True)
        static_filepath = os.path.join(static_wordclouds_dir, f"{product_id}_{filename}")
        plt.figure(figsize=(10, 5))
        plt.imshow(wc, interpolation='bilinear')
        plt.axis('off')
        plt.tight_layout(pad=0)
        plt.savefig(static_filepath, dpi=300, bbox_inches='tight')
        plt.close()
        
        # 返回图片URL (使用API路由获取文件)
        image_url = f"/api/products/{product_id}/files/{filename}"
        
        return {
            "status": "success",
            "image_url": image_url,
            "product_id": product_id,
            "is_cached": False
        }
        
    except Exception as e:
        logging.error(f"生成词云失败: {str(e)}")
        import traceback
        traceback.print_exc()
        raise HTTPException(status_code=500, detail=str(e))


@router.get("/{product_id}/wordcloud/analyze", response_model=dict)
async def analyze_wordcloud(product_id: str, force_regenerate: bool = Query(False, description="是否强制重新分析")):
    """
    使用AI分析产品词云，并提供关键词分析结果
    
    参数:
    - product_id: 产品ID
    - force_regenerate: 是否强制重新分析，默认False
    
    返回:
    - 词云分析结果
    """
    try:
        import os
        import yaml
        import jieba
        import collections
        import re
        from app.utils.llm_utils import LLMProcessor
        
        # 检查产品是否存在
        product_folder = os.path.join(settings.PRODUCTS_DIR, product_id)
        if not os.path.exists(product_folder):
            raise HTTPException(status_code=404, detail=f"未找到ID为 {product_id} 的产品")
        
        # 读取分析结果文件
        analysis_file = os.path.join(product_folder, "analysis_result.yaml")
        if not os.path.exists(analysis_file):
            raise HTTPException(status_code=404, detail="产品分析结果不存在")
        
        # 检查是否已有词云分析结果
        with open(analysis_file, 'r', encoding='utf-8') as f:
            # 使用FullLoader而不是safe_load，以支持Python对象如元组
            analysis_data = yaml.load(f, Loader=FullLoader) or {}
        
        # 如果已有词云分析结果且不需要重新生成，则直接返回
        if "wordcloud_analysis" in analysis_data and not force_regenerate:
            logging.info(f"从缓存读取产品 {product_id} 的词云分析结果")
            return {
                "status": "success",
                "analysis": analysis_data["wordcloud_analysis"],
                "is_cached": True
            }
        
        # 获取产品分析文本
        analysis_text = analysis_data.get('analysis_result', '')
        if not analysis_text:
            raise HTTPException(status_code=400, detail="产品分析文本为空")
        
        # 进行词频统计
        # 停用词
        stopwords = {'的', '了', '是', '在', '和', '与', '以', '及', '或', '等', '对', '能', '都', '为', '有', '这', '那', '这些', '那些', '中', '上', '下', '前', '后', '内', '外', '再', '又', '也', '但', '不', '很', '非常', '更', '最', '因为', '所以', '可以', '可能', '应该', '会', '通过', '使用', '如果', '因此', '由于', '同时', '并且', '以及', '此外', '而且', '但是', '然而'}
        
        # 分词并统计频率
        words = jieba.cut(analysis_text, cut_all=False)
        words_filtered = [word for word in words if len(word) > 1 and word not in stopwords]
        word_freq = collections.Counter(words_filtered)
        
        # 获取前20个高频词
        top_words = word_freq.most_common(20)
        
        # 提取关键信息进行分析
        product_name = product_id.split('_')[0] if '_' in product_id else product_id
        
        # 提取产品主要特点
        features_match = re.search(r'主要功能特点(.*?)(?=###|$)', analysis_text, re.DOTALL)
        features_text = features_match.group(1) if features_match else ""
        
        # 提取产品优势
        advantages_match = re.search(r'市场优势(.*?)(?=###|$)', analysis_text, re.DOTALL)
        advantages_text = advantages_match.group(1) if advantages_match else ""
        
        # 提取产品亮点
        highlights_match = re.search(r'核心亮点总结(.*?)(?=###|$)', analysis_text, re.DOTALL)
        highlights_text = highlights_match.group(1) if highlights_match else ""
        
        # 提取关键词组
        keyword_groups = []
        
        # 产品特性词
        if features_text:
            feature_keywords = [w for w, _ in top_words if w in features_text][:5]
            keyword_groups.append({
                "group_name": "产品特性词",
                "keywords": feature_keywords,
                "description": "这些关键词反映了产品的主要特性和功能"
            })
        
        # 市场优势词
        if advantages_text:
            advantage_keywords = [w for w, _ in top_words if w in advantages_text][:5]
            keyword_groups.append({
                "group_name": "市场优势词",
                "keywords": advantage_keywords,
                "description": "这些关键词体现了产品在市场中的竞争优势"
            })
        
        # 核心亮点词
        if highlights_text:
            highlight_keywords = [w for w, _ in top_words if w in highlights_text][:5]
            keyword_groups.append({
                "group_name": "核心亮点词",
                "keywords": highlight_keywords,
                "description": "这些关键词展示了产品最显著的特点和卖点"
            })
        
        # 根据词频和文本内容进行AI分析
        main_keywords = [word for word, _ in top_words[:10]]
        
        # 调用LLM分析
        prompt = f"""
基于以下产品关键词和信息，请提供简短的营销建议和市场分析:

产品名称: {product_name}
高频关键词: {', '.join(main_keywords)}
产品特性: {', '.join(feature_keywords) if 'feature_keywords' in locals() and feature_keywords else '无数据'}
市场优势: {', '.join(advantage_keywords) if 'advantage_keywords' in locals() and advantage_keywords else '无数据'}
核心亮点: {', '.join(highlight_keywords) if 'highlight_keywords' in locals() and highlight_keywords else '无数据'}

请简要分析这些关键词的重要性，并提供4-5条具体的营销建议。
        """
        
        try:
            ai_suggestions = await LLMProcessor.analyze_with_prompt(prompt)
            # 分割AI回复为多个建议
            suggestions = re.findall(r'\d+\.\s+(.*?)(?=\n\d+\.|\n\n|$)', ai_suggestions, re.DOTALL)
            marketing_suggestions = [s.strip() for s in suggestions if s.strip()]
            
            if not marketing_suggestions:
                # 如果无法从AI输出中提取结构化建议，使用简单的默认建议
                marketing_suggestions = [
                    f'在产品描述中突出"{main_keywords[0]}"和"{main_keywords[1]}"的相关特性',
                    f'使用"{main_keywords[2]}"作为产品的主要卖点之一',
                    f'针对搜索引擎优化，将"{", ".join(main_keywords[:5])}"等关键词作为SEO关键词',
                    "根据词频分析调整产品描述，使其更符合市场需求"
                ]
        except Exception as e:
            logging.error(f"AI分析失败: {str(e)}")
            # 使用默认建议
            marketing_suggestions = [
                f'在产品描述中突出"{main_keywords[0]}"和"{main_keywords[1]}"的相关特性',
                f'使用"{main_keywords[2]}"作为产品的主要卖点之一',
                f'针对搜索引擎优化，将"{", ".join(main_keywords[:5])}"等关键词作为SEO关键词',
                "根据词频分析调整产品描述，使其更符合市场需求"
            ]
        
        # 生成分析摘要
        summary = f'通过词云分析，我们可以看出该产品以"{main_keywords[0]}"、"{main_keywords[1]}"和"{main_keywords[2]}"为核心卖点，主要强调了产品的{", ".join(main_keywords[3:6])}等特性。建议在营销推广中重点突出这些关键词以吸引目标客户。'
        
        # 生成分析结论
        analysis_result = {
            "product_name": product_name,
            "top_keywords": top_words,
            "keyword_groups": keyword_groups,
            "summary": summary,
            "marketing_suggestions": marketing_suggestions,
            "generated_at": datetime.now().isoformat()
        }
        
        # 保存分析结果到YAML文件
        analysis_data["wordcloud_analysis"] = analysis_result
        with open(analysis_file, 'w', encoding='utf-8') as f:
            yaml.dump(analysis_data, f, allow_unicode=True, Dumper=yaml.SafeDumper)
        
        return {
            "status": "success",
            "analysis": analysis_result,
            "is_cached": False
        }
        
    except Exception as e:
        logging.error(f"分析词云失败: {str(e)}")
        import traceback
        traceback.print_exc()
        raise HTTPException(status_code=500, detail=str(e))

def extract_keywords_from_text(text, count=5):
    """从文本中提取关键词"""
    if not text.strip():
        return []
    
    words = jieba.cut(text)
    word_freq = collections.Counter()
    
    # 过滤停用词和单字词
    stop_words = {'的', '了', '和', '是', '在', '有', '与', '这', '我们', '您', '也', '都', '等', '使', '被', '将', '更'}
    for word in words:
        word = word.strip()
        if word and len(word) > 1 and word not in stop_words:
            word_freq[word] += 1
    
    # 返回高频词
    return [word for word, _ in word_freq.most_common(count)]

def get_product_info(product_id):
    """获取产品基本信息"""
    product_file = f"data/products/{product_id}/info.yaml"
    if os.path.exists(product_file):
        with open(product_file, 'r', encoding='utf-8') as f:
            return yaml.safe_load(f)
    return {"name": product_id} 