"""
Vehicle 模块 - PicknBuy24车辆业务服务 (简化版)
"""
from typing import List, Dict, Any, Set
from axiom_boot import service
from axiom_boot.database.transaction import transactional
from axiom_boot.database.base_service import BaseService
from axiom_boot.di import autowired
from axiom_boot.logging.setup import get_logger
from axiom_boot.core.id_generator import IdGenerator
from axiom_boot.cache.service import CacheService

from ..mapper.vehicle_picknbuy24_mapper import VehiclePicknbuy24Mapper
from ..models.vehicle_picknbuy24 import VehiclePicknbuy24
from ...storage.service.storage_service import StorageService
from ...storage.service.file_relation_service import FileRelationService
from ...storage.service.storage_bucket_service import StorageBucketService
from ...scraper.services.scraper_failed_record_service import ScraperFailedRecordService

logger = get_logger(__name__)


@service()
class VehicleService(BaseService[VehiclePicknbuy24, VehiclePicknbuy24Mapper]):
    """PicknBuy24车辆业务服务 - 简化版本"""
    
    storage_service: StorageService = autowired()
    file_relation_service: FileRelationService = autowired()
    bucket_service: StorageBucketService = autowired()
    id_generator: IdGenerator = autowired()
    cache_service: CacheService = autowired()
    failed_record_service: ScraperFailedRecordService = autowired()
    
    def __init__(self, picknbuy24_mapper: VehiclePicknbuy24Mapper):
        super().__init__(picknbuy24_mapper)
    
    @transactional
    async def batch_save_vehicles_only(self, vehicles_data: List[Dict[str, Any]], 
                                     source: str = "picknbuy24", 
                                     update_existing: bool = True) -> int:
        """
        批量保存车辆基本信息（不下载图片）- 高性能版本，带去重校验
        """
        logger.info(f"开始批量保存 {len(vehicles_data)} 条车辆数据（仅基本信息），包含去重校验")
        
        # 记录当前批次的所有ref_no，用于后续售出检测
        current_batch_ref_nos = {vehicle_data.get('ref_no') for vehicle_data in vehicles_data if vehicle_data.get('ref_no')}
        
        # 1. 预处理：提取所有ref_no进行批量去重检查
        ref_nos = [vehicle_data.get('ref_no', '') for vehicle_data in vehicles_data if vehicle_data.get('ref_no')]
        if not ref_nos:
            logger.warning("没有有效的ref_no，跳过保存")
            return 0
        
        # 2. 使用缓存优化的去重检查
        existing_ref_nos = await self._get_existing_ref_nos_with_cache(ref_nos)
        
        logger.info(f"发现已存在的车辆: {len(existing_ref_nos)}/{len(ref_nos)}")
        
        # 3. 分离新车辆和已存在车辆
        new_vehicles_data = []
        existing_vehicles_data = []
        
        for vehicle_data in vehicles_data:
            ref_no = vehicle_data.get('ref_no')
            if ref_no:
                if ref_no in existing_ref_nos:
                    existing_vehicles_data.append(vehicle_data)
                else:
                    new_vehicles_data.append(vehicle_data)
        
        logger.info(f"新车辆: {len(new_vehicles_data)} 条，已存在车辆: {len(existing_vehicles_data)} 条")
        
        total_saved = 0
        
        # 4. 处理新车辆（插入）
        if new_vehicles_data:
            logger.info(f"准备保存 {len(new_vehicles_data)} 条新车辆数据")
            saved_new = await self._save_new_vehicles(new_vehicles_data, source)
            total_saved += saved_new
        
        # 5. 处理已存在车辆（更新缺失字段）
        if existing_vehicles_data and update_existing:
            logger.info(f"准备更新 {len(existing_vehicles_data)} 条已存在车辆的缺失字段")
            updated_count = await self._update_existing_vehicles(existing_vehicles_data, source)
            total_saved += updated_count
        elif existing_vehicles_data:
            logger.info(f"跳过更新 {len(existing_vehicles_data)} 条已存在车辆")
        
        return total_saved
    
    async def _save_new_vehicles(self, vehicles_data: List[Dict[str, Any]], source: str) -> int:
        """保存新车辆数据"""
        vehicles = []
        for vehicle_data in vehicles_data:
            try:
                vehicle = self._create_vehicle_model(vehicle_data, source)
                # 设置图片状态为待下载
                vehicle.image_status = 'pending'
                vehicle.image_count = len(vehicle_data.get('images', []))
                vehicles.append(vehicle)
            except Exception as e:
                logger.error(f"创建车辆模型失败: {vehicle_data.get('ref_no', 'Unknown')} - {e}")
                continue
        
        # 批量保存新车辆
        if vehicles:
            try:
                await self.batch_save(vehicles)
                logger.info(f"新车辆批量保存完成: {len(vehicles)}/{len(vehicles_data)}")
                return len(vehicles)
            except Exception as e:
                # 处理可能的唯一约束冲突
                logger.warning(f"批量保存遇到冲突，尝试逐个保存: {e}")
                success_count = 0
                for vehicle in vehicles:
                    try:
                        await self.save(vehicle)
                        success_count += 1
                    except Exception as save_error:
                        logger.warning(f"车辆 {vehicle.ref_no} 保存失败（可能已存在）: {save_error}")
                        continue
                logger.info(f"新车辆逐个保存完成，成功: {success_count}/{len(vehicles)}")
                return success_count
        
        return 0
    
    async def _update_existing_vehicles(self, vehicles_data: List[Dict[str, Any]], source: str) -> int:
        """更新已存在车辆的字段 - 价格相关字段强制更新，其他字段只填充空值"""
        updated_count = 0
        
        for vehicle_data in vehicles_data:
            try:
                ref_no = vehicle_data.get('ref_no')
                if not ref_no:
                    continue
                
                # 查找现有车辆
                existing_vehicle = await self.find_one_by_filters(ref_no=ref_no)
                if not existing_vehicle:
                    continue
                
                # 检查哪些重要字段需要更新
                needs_update = False
                update_fields = {}
                
                # === 价格相关字段 - 强制更新（每周重新爬取时需要获取最新价格） ===
                price_fields = ['status_tag', 'price', 'currency', 'price_original', 
                               'price_before_discount', 'discount_amount', 'has_discount']
                
                for field in price_fields:
                    new_value = vehicle_data.get(field)
                    if new_value is not None:  # 只要新数据不是None就更新
                        current_value = getattr(existing_vehicle, field, None)
                        if current_value != new_value:
                            update_fields[field] = new_value
                            needs_update = True
                
                # === 图片数量检查和状态管理 ===
                new_image_count = vehicle_data.get('image_count', 0)
                current_image_count = existing_vehicle.image_count or 0
                
                if new_image_count != current_image_count:
                    # 图片数量不一致，需要重新下载图片
                    update_fields['image_count'] = new_image_count
                    update_fields['image_status'] = 'pending'
                    needs_update = True
                    logger.debug(f"车辆 {ref_no} 图片数量变化: {current_image_count} -> {new_image_count}，设为待下载")
                
                # === 其他字段 - 只填充空值 ===
                
                # 技术规格字段
                if not existing_vehicle.seats and vehicle_data.get('seats'):
                    update_fields['seats'] = vehicle_data.get('seats')
                    needs_update = True
                
                if not existing_vehicle.doors and vehicle_data.get('doors'):
                    update_fields['doors'] = vehicle_data.get('doors')
                    needs_update = True
                
                if not existing_vehicle.chassis and vehicle_data.get('chassis'):
                    update_fields['chassis'] = vehicle_data.get('chassis')
                    needs_update = True
                
                # 配置特性字段
                if not existing_vehicle.convenient_features and vehicle_data.get('convenient_features'):
                    # 将列表转换为字符串
                    features = vehicle_data.get('convenient_features', [])
                    if isinstance(features, list) and features:
                        update_fields['convenient_features'] = '; '.join(features)
                        needs_update = True
                
                # 执行更新
                if needs_update:
                    # 记录更新的详细信息
                    updated_fields = list(update_fields.keys())
                    price_updates = []
                    
                    # 记录价格相关字段的变化
                    for field in ['status_tag', 'price', 'currency', 'price_original', 
                                'price_before_discount', 'discount_amount', 'has_discount']:
                        if field in update_fields:
                            old_value = getattr(existing_vehicle, field, None)
                            new_value = update_fields[field]
                            if old_value != new_value:
                                price_updates.append(f"{field}: {old_value} -> {new_value}")
                    
                    await self.update_by_pk(existing_vehicle.id, update_fields)
                    updated_count += 1
                    
                    # 记录详细的更新日志
                    if price_updates:
                        logger.info(f"车辆 {ref_no} 价格信息更新: {', '.join(price_updates)}")
                    
                    if 'image_count' in update_fields or 'image_status' in update_fields:
                        logger.info(f"车辆 {ref_no} 图片状态更新: 数量 {current_image_count} -> {new_image_count}, 状态设为 pending")
                    
                    logger.debug(f"车辆 {ref_no} 更新完成，字段: {updated_fields}")
                
            except Exception as e:
                logger.error(f"更新车辆 {vehicle_data.get('ref_no', 'Unknown')} 失败: {e}")
                continue
        
        logger.info(f"已存在车辆更新完成: {updated_count}/{len(vehicles_data)}")
        return updated_count
    
    async def find_pending_image_vehicles(self, batch_size: int = 50, offset: int = 0) -> List[VehiclePicknbuy24]:
        """
        查找待下载图片的车辆
        
        Args:
            batch_size: 批量大小
            offset: 偏移量
            
        Returns:
            待下载图片的车辆列表
        """
        # 只查询pending状态的车辆，重试交给专门的重试任务
        all_vehicles = await self.find_by_filters(image_status='pending')
        
        # 手动切片获取指定批次的车辆
        end_offset = offset + batch_size
        vehicles = all_vehicles[offset:end_offset]
        
        logger.info(f"查找到 {len(vehicles)} 个待下载图片的车辆（偏移: {offset}）")
        return vehicles
    
    @transactional
    async def reset_image_status_for_updated_vehicles(self) -> int:
        """
        重置图片状态为pending的车辆，主要用于定期爬取后的图片更新
        
        Returns:
            重置的车辆数量
        """
        # 查找image_status为'pending'的车辆
        pending_vehicles = await self.find_by_filters(
            image_status='pending',
            order_by=['update_time desc']
        )
        
        reset_count = 0
        for vehicle in pending_vehicles:
            # 确保这些车辆的图片状态确实需要重新下载
            if vehicle.image_count and vehicle.image_count > 0:
                # 有图片数量记录，状态设为pending表示需要重新下载
                logger.info(f"车辆 {vehicle.ref_no} 需要重新下载 {vehicle.image_count} 张图片")
                reset_count += 1
            else:
                # 没有图片或图片数量为0，直接标记为completed
                await self.update_by_pk(vehicle.id, {'image_status': 'completed'})
                logger.debug(f"车辆 {vehicle.ref_no} 无图片，标记为completed")
        
        logger.info(f"发现 {reset_count} 个车辆需要重新下载图片")
        return reset_count
    
    async def count_pending_image_vehicles(self) -> int:
        """
        统计待下载图片的车辆总数
        
        Returns:
            待下载图片的车辆总数
        """
        count = await self.count(image_status='pending')
        logger.info(f"待下载图片的车辆总数: {count}")
        return count
    
    def _create_vehicle_model(self, vehicle_data: Dict[str, Any], source: str) -> VehiclePicknbuy24:
        """创建车辆模型实例"""
        vehicle = VehiclePicknbuy24()
        vehicle.source = source
        vehicle.ref_no = vehicle_data.get('ref_no', '')
        vehicle.detail_url = vehicle_data.get('detail_url', '')
        
        # 基本信息
        vehicle.year = vehicle_data.get('year', '')
        vehicle.make = vehicle_data.get('make', '')
        vehicle.model = vehicle_data.get('model', '')
        vehicle.vehicle_type = vehicle_data.get('vehicle_type', 'UNKNOWN')
        vehicle.vehicle_type_confidence = vehicle_data.get('vehicle_type_confidence', 'UNKNOWN')
        
        # 价格信息 - 完整映射
        vehicle.currency = vehicle_data.get('currency', 'USD')
        vehicle.price = vehicle_data.get('price')  # DECIMAL类型
        vehicle.price_original = vehicle_data.get('price_original', '')
        vehicle.price_before_discount = vehicle_data.get('price_before_discount')
        vehicle.discount_amount = vehicle_data.get('discount_amount')
        vehicle.has_discount = vehicle_data.get('has_discount', 0)
        
        # 状态标签
        vehicle.status_tag = vehicle_data.get('status_tag', 'normal')
        
        # 技术规格
        vehicle.mileage_km = vehicle_data.get('mileage_km', '')
        vehicle.mileage_miles = vehicle_data.get('mileage_miles', '')
        vehicle.engine_cc = vehicle_data.get('engine_cc', '')
        vehicle.fuel_type = vehicle_data.get('fuel_type', '')
        vehicle.transmission = vehicle_data.get('transmission', '')
        vehicle.drivetrain = vehicle_data.get('drivetrain', '')
        vehicle.steering = vehicle_data.get('steering', '')
        
        # 车身信息
        vehicle.seats = vehicle_data.get('seats', '')
        vehicle.doors = vehicle_data.get('doors', '')
        vehicle.color = vehicle_data.get('color', '')
        
        # 尺寸重量
        vehicle.length_m = vehicle_data.get('length_m', '')
        vehicle.width_m = vehicle_data.get('width_m', '')
        vehicle.height_m = vehicle_data.get('height_m', '')
        vehicle.weight_kg = vehicle_data.get('weight_kg', '')
        vehicle.cubic_meter = vehicle_data.get('cubic_meter', '')
        
        # PicknBuy24特有字段
        vehicle.chassis = vehicle_data.get('chassis', '')
        vehicle.chassis_no_full = vehicle_data.get('chassis_no_full', '')
        vehicle.reg_year_month = vehicle_data.get('reg_year_month', '')
        vehicle.location = vehicle_data.get('location', '')
        vehicle.engine_type = vehicle_data.get('engine_type', '')
        vehicle.seatbelts_year = vehicle_data.get('seatbelts_year', '')
        
        # 配置信息
        vehicle.exterior_options = vehicle_data.get('exterior_options', [])
        vehicle.interior_options = vehicle_data.get('interior_options', [])
        vehicle.safety_features = vehicle_data.get('safety_features', [])
        vehicle.convenient_features = vehicle_data.get('convenient_features', [])
        vehicle.multimedia_features = vehicle_data.get('multimedia_features', [])
        vehicle.equipment_features = vehicle_data.get('equipment_features', [])
        vehicle.remarkable_features = vehicle_data.get('remarkable_features', [])
        
        return vehicle
    
    @transactional
    async def download_vehicle_images(self, vehicle_id: str, images_data: List[Dict[str, Any]]) -> bool:
        """
        为指定车辆下载图片
        
        Args:
            vehicle_id: 车辆ID
            images_data: 图片数据列表
            
        Returns:
            是否成功
        """
        try:
            # 更新状态为下载中
            await self.update_by_pk(vehicle_id, {'image_status': 'downloading'})
            
            # 下载图片
            image_count = await self._save_vehicle_images(vehicle_id, images_data)
            
            # 更新状态为已完成
            await self.update_by_pk(vehicle_id, {
                'image_status': 'completed',
                'image_count': image_count
            })
            
            logger.info(f"车辆 {vehicle_id} 图片下载成功，共 {image_count} 张")
            return True
            
        except Exception as e:
            # 更新状态为失败
            await self.update_by_pk(vehicle_id, {'image_status': 'failed'})
            logger.error(f"车辆 {vehicle_id} 图片下载失败: {e}")
            
            # 记录失败信息到失败记录表
            try:
                from ...scraper.models.failure_record_request import FileDownloadFailureRequest
                await self.failed_record_service.record_file_download_failure(
                    FileDownloadFailureRequest(
                        site_name="picknbuy24",
                        error=e,
                        item_reference=vehicle_id,
                        target_url=f"vehicle_images_{vehicle_id}",
                        failure_data={"images_count": len(images_data) if images_data else 0}
                    )
                )
                logger.info(f"已记录车辆 {vehicle_id} 图片下载失败到失败记录表")
            except Exception as record_error:
                logger.error(f"记录失败信息失败: {record_error}")
            
            return False
    

    
    async def _save_vehicle_images(self, vehicle_id: str, images: List[Dict[str, Any]]) -> int:
        """
        批量保存车辆图片 - 按年份组织
        
        Args:
            vehicle_id: 车辆ID
            images: 图片信息列表
            
        Returns:
            成功保存的图片数量
        """
        if not images:
            return 0
            
        try:
            # 1. 确保存储桶存在，获取真实的桶名
            bucket_name = "vehicles"
            real_bucket_name = await self._ensure_bucket_exists(bucket_name)
            
            # 2. 获取车辆信息并确定文件夹路径（按车辆年份组织）
            vehicle = await self.find_by_pk(vehicle_id)
            if not vehicle:
                logger.error(f"未找到车辆ID: {vehicle_id}")
                return 0
            
            # 按车辆年份分类，验证年份合理性
            vehicle_year = self._validate_vehicle_year(vehicle.year)
            folder_path = f"picknbuy24/{vehicle_year}"
            
            # 3. 确保文件夹存在
            folder_id = await self._ensure_folder_exists(real_bucket_name, folder_path)
            
            # 4. 批量下载和准备文件数据 - 并发下载
            import asyncio
            
            async def download_single_image(i: int, image_info: Dict[str, Any]):
                """下载单张图片"""
                image_url = image_info.get('url', '')
                if not image_url:
                    return None
                    
                try:
                    # 生成唯一文件名：车辆ID_序号.jpg
                    ext = image_info.get('filename', '.jpg').split('.')[-1] or 'jpg'
                    filename = f"vehicle_{vehicle_id}_{i+1:03d}.{ext}"
                    
                    # 下载图片数据
                    file_data = await self._download_image_data(image_url)
                    if file_data:
                        return {
                            'filename': filename,
                            'data': file_data,
                            'size_bytes': len(file_data),
                            'mime_type': f'image/{ext}',
                            'sort_order': i + 1,  # 按链接顺序排序
                            'is_primary': i == 0   # 第一张设为主图
                        }
                        
                except Exception as e:
                    logger.warning(f"下载图片失败: {image_url} - {e}")
                    return None
            
            # 进一步降低图片并发：2张图片并发，提高网络稳定性
            semaphore = asyncio.Semaphore(2)
            
            async def download_with_limit(i: int, image_info: Dict[str, Any]):
                async with semaphore:
                    # 添加小延迟避免触发反爬机制
                    if i > 0:  # 第一张图片不延迟
                        await asyncio.sleep(0.5)  # 500ms间隔
                    return await download_single_image(i, image_info)
            
            logger.info(f"开始并发下载 {len(images)} 张图片")
            download_tasks = [download_with_limit(i, img) for i, img in enumerate(images)]
            download_results = await asyncio.gather(*download_tasks, return_exceptions=True)
            
            # 过滤成功的结果
            batch_files_data = []
            success_count = 0
            for result in download_results:
                if result and not isinstance(result, Exception):
                    batch_files_data.append(result)
                    success_count += 1
            
            logger.info(f"图片下载完成，成功 {success_count}/{len(images)} 张")
            
            if not batch_files_data:
                return 0
            
            # 5. 批量上传文件
            uploaded_files = await self._batch_upload_files(
                files_data=batch_files_data,
                bucket=real_bucket_name,
                parent_id=folder_id
            )
            
            # 6. 批量创建文件关联
            await self._batch_create_file_relations(
                business_type="picknbuy24",
                business_id=vehicle_id,
                file_ids=[f.id for f in uploaded_files],
                relation_type="image"
            )
            
            logger.info(f"车辆 {vehicle_id} 成功保存了 {len(uploaded_files)} 张图片到 {real_bucket_name}/{folder_path}")
            return len(uploaded_files)
            
        except Exception as e:
            logger.error(f"批量保存车辆图片失败: {vehicle_id} - {e}")
            return 0
    
    @transactional
    async def _ensure_bucket_exists(self, bucket_name: str) -> str:
        """确保存储桶存在，返回桶名 (带缓存优化)"""
        # 构建缓存键
        cache_key = f"bucket_exists:{bucket_name}"
        
        # 先检查缓存
        cached_result = await self.cache_service.get(cache_key)
        if cached_result:
            logger.debug(f"缓存命中存储桶: {bucket_name}")
            return bucket_name
        
        try:
            # 缓存未命中，检查桶是否已存在
            existing_bucket = await self.bucket_service.find_one_by_filters(bucket_name=bucket_name)
            if existing_bucket:
                logger.info(f"使用已存在的桶: {bucket_name}")
                # 缓存结果 (4小时过期)
                await self.cache_service.set(cache_key, "exists", 14400)
                return bucket_name
                
            # 桶不存在，创建新桶（指定具体的桶名）
            logger.info(f"创建新存储桶: {bucket_name}")
            from ...storage.models.api_models import BucketCreateDTO
            bucket_dto = BucketCreateDTO(
                display_name=bucket_name,
                bucket_name=bucket_name,  # 指定具体的桶名
                description=f"车辆图片存储桶: {bucket_name}"
            )
            created_bucket = await self.bucket_service.create_bucket(bucket_dto)
            logger.info(f"存储桶创建成功: {created_bucket.bucket_name}")
            
            # 缓存结果
            await self.cache_service.set(cache_key, "exists", 14400)
            return created_bucket.bucket_name
            
        except Exception as e:
            logger.error(f"确保存储桶存在失败: {bucket_name} - {e}")
            raise
    
    @transactional
    async def _ensure_folder_exists(self, bucket_name: str, folder_path: str) -> int:
        """
        确保文件夹路径存在，返回最终文件夹ID (带缓存优化)
        
        Args:
            bucket_name: 桶名称
            folder_path: 文件夹路径，如 "picknbuy24/2024"
            
        Returns:
            文件夹ID
        """
        # 构建缓存键
        cache_key = f"folder_id:{bucket_name}:{folder_path}"
        
        # 先检查缓存
        cached_folder_id = await self.cache_service.get(cache_key)
        if cached_folder_id:
            logger.debug(f"缓存命中文件夹: {folder_path} (ID: {cached_folder_id})")
            return int(cached_folder_id)
        
        # 缓存未命中，执行创建逻辑
        logger.debug(f"缓存未命中，创建文件夹: {folder_path}")
        
        # 拆分路径
        path_parts = [p for p in folder_path.split('/') if p]
        current_parent_id = None
        
        for part in path_parts:
            try:
                # 查找当前层级的文件夹
                folders = await self.storage_service.find_by_filters(
                    bucket=bucket_name,
                    name=part,
                    is_dir=1,
                    parent_id=current_parent_id
                )
                folder = folders[0] if folders else None
                
                if folder:
                    # 如果找到多个重复文件夹，记录警告
                    if len(folders) > 1:
                        logger.warning(f"发现 {len(folders)} 个重复文件夹: {bucket_name}/{part}, 使用第一个 (ID: {folder.id})")
                    current_parent_id = folder.id
                    logger.debug(f"找到文件夹: {part} (ID: {folder.id})")
                else:
                    # 文件夹不存在，创建它
                    logger.info(f"创建文件夹: {bucket_name}/{part}")
                    from src.storage.models.api_models import FolderCreateDTO
                    folder_dto = FolderCreateDTO(
                        name=part,
                        bucket=bucket_name,
                        parent_id=str(current_parent_id) if current_parent_id else None
                    )
                    created_folder = await self.storage_service.create_folder(folder_dto)
                    current_parent_id = created_folder.id
                    logger.info(f"文件夹创建成功: {part} (ID: {created_folder.id})")
                    
            except Exception as e:
                logger.error(f"处理文件夹失败: {part} - {e}")
                raise
        
        # 缓存结果 (4小时过期)
        await self.cache_service.set(cache_key, str(current_parent_id), 14400)
        logger.debug(f"文件夹ID已缓存: {folder_path} -> {current_parent_id}")
        
        return current_parent_id
    
    def _validate_vehicle_year(self, year: str) -> str:
        """
        验证并标准化车辆年份
        
        Args:
            year: 原始年份字符串
            
        Returns:
            验证后的年份字符串，无效年份返回 'unknown'
        """
        if not year:
            return "unknown"
        
        try:
            year_int = int(year)
            # 验证年份合理性（汽车制造年份范围）
            if 1900 <= year_int <= 2030:
                return str(year_int)
            else:
                logger.warning(f"年份超出合理范围: {year}，使用 unknown")
                return "unknown"
        except (ValueError, TypeError):
            logger.warning(f"年份格式无效: {year}，使用 unknown") 
            return "unknown"
    
    async def _download_image_data(self, image_url: str) -> bytes:
        """
        下载图片数据 - 使用HttpxDownloader优化性能
        
        Args:
            image_url: 图片URL
            
        Returns:
            图片二进制数据
        """
        from axiom_boot.scraper import Target
        from axiom_boot.scraper.downloaders.httpx_downloader import HttpxDownloader
        from axiom_boot.conf.manager import settings
        
        try:
            # 使用HttpxDownloader进行下载
            downloader = HttpxDownloader()
            downloader._settings = settings  # 注入设置
            
            target = Target(url=image_url, method="GET")
            response = await downloader.download(target)
            
            if response.status_code == 200:
                return response.content
            else:
                raise Exception(f"HTTP状态码错误: {response.status_code}")
                
        except Exception as e:
            logger.error(f"图片下载失败: {image_url} - {e}")
            raise
    
    async def _batch_upload_files(self, files_data: List[Dict[str, Any]], 
                                bucket: str, parent_id: int) -> List:
        """
        批量上传文件
        
        Args:
            files_data: 文件数据列表
            bucket: 桶名称
            parent_id: 父文件夹ID
            
        Returns:
            上传后的文件对象列表
        """
        uploaded_files = []
        
        for file_data in files_data:
            try:
                # 创建异步迭代器
                async def file_stream():
                    yield file_data['data']
                
                # 调用存储服务上传文件
                file_obj = await self.storage_service.upload_file(
                    filename=file_data['filename'],
                    mime_type=file_data['mime_type'],
                    size_bytes=file_data['size_bytes'],
                    stream=file_stream(),
                    bucket=bucket,
                    parent_id=parent_id
                )
                
                # 保存排序和主图信息到上传结果中
                file_obj._sort_order = file_data.get('sort_order', 0)
                file_obj._is_primary = file_data.get('is_primary', False)
                uploaded_files.append(file_obj)
                logger.debug(f"文件上传成功: {file_data['filename']}")
                
            except Exception as e:
                logger.error(f"文件上传失败: {file_data['filename']} - {e}")
                continue
        
        return uploaded_files
    
    @transactional
    async def _batch_create_file_relations(self, business_type: str, business_id: str,
                                         file_ids: List[int], relation_type: str):
        """
        批量创建文件业务关联
        
        Args:
            business_type: 业务类型
            business_id: 业务ID
            file_ids: 文件ID列表
            relation_type: 关联类型
        """
        for file_id in file_ids:
            try:
                await self.file_relation_service.create_relation(
                    business_type=business_type,
                    business_id=business_id,
                    file_storage_id=file_id,
                    relation_type=relation_type
                )
            except Exception as e:
                logger.error(f"创建文件关联失败: business_id={business_id}, file_id={file_id} - {e}")
                continue
    
    @transactional 
    async def mark_vehicles_as_sold(self, current_ref_nos: set, task_id: str = None) -> int:
        """
        标记不在当前批次中的车辆为售出状态
        
        Args:
            current_ref_nos: 当前爬取批次中的所有ref_no
            task_id: 任务ID（用于日志）
            
        Returns:
            标记为售出的车辆数量
        """
        if not current_ref_nos:
            logger.warning("当前批次ref_nos为空，跳过售出标记")
            return 0
        
        logger.info(f"检测售出车辆：当前批次包含 {len(current_ref_nos)} 辆车")
        
        # 查找数据库中所有未标记为售出的车辆
        all_vehicles = await self.find_by_filters(is_deleted=0)
        active_vehicles = [v for v in all_vehicles if v.status_tag != 'sold']
        
        sold_count = 0
        sold_ref_nos = []
        
        for vehicle in active_vehicles:
            # 如果数据库中的车辆不在当前批次中，说明已售出下架
            if vehicle.ref_no not in current_ref_nos:
                # 更新为售出状态
                await self.update_by_pk(
                    vehicle.id, 
                    status_tag='sold',
                    version=vehicle.version + 1
                )
                sold_count += 1
                sold_ref_nos.append(vehicle.ref_no)
        
        if sold_count > 0:
            logger.info(f"标记了 {sold_count} 辆车为售出状态: {sold_ref_nos[:10]}{'...' if len(sold_ref_nos) > 10 else ''}")
        else:
            logger.info("未发现新的售出车辆")
        
        return sold_count
    
    @transactional
    async def update_vehicle_pricing_only(self, ref_no: str, pricing_data: Dict[str, Any]) -> bool:
        """
        仅更新车辆价格相关信息（用于二阶段更新）
        
        Args:
            ref_no: 车辆参考号
            pricing_data: 价格相关数据
            
        Returns:
            是否更新成功
        """
        try:
            # 查找现有车辆
            existing_vehicles = await self.find_by_filters(ref_no=ref_no)
            existing_vehicle = existing_vehicles[0] if existing_vehicles else None
            if not existing_vehicle:
                logger.warning(f"未找到车辆 {ref_no}，跳过价格更新")
                return False
            
            # 提取需要更新的价格字段
            update_fields = {}
            price_fields = ['currency', 'price', 'price_original', 'price_before_discount', 
                           'discount_amount', 'has_discount', 'status_tag']
            
            for field in price_fields:
                if field in pricing_data:
                    update_fields[field] = pricing_data[field]
            
            if update_fields:
                update_fields['version'] = existing_vehicle.version + 1
                await self.update_by_pk(existing_vehicle.id, **update_fields)
                logger.debug(f"更新车辆 {ref_no} 的价格信息: {list(update_fields.keys())}")
                return True
            
            return False
            
        except Exception as e:
            logger.error(f"更新车辆 {ref_no} 价格失败: {e}")
            return False
    
    async def _get_existing_ref_nos_with_cache(self, ref_nos: List[str]) -> Set[str]:
        """
        优化的去重检查 - 禁用缓存避免干扰定期更新
        
        对于定期任务，我们需要确保能够更新缺失字段，
        缓存会干扰这个过程，因此直接查询数据库
        """
        # 定期任务模式：直接查询数据库，不使用缓存
        logger.debug(f"定期任务模式：直接查询数据库检查 {len(ref_nos)} 个车辆是否存在")
        
        existing_ref_nos = set()
        
        # 直接查询数据库获取所有存在的车辆
        if ref_nos:
            db_existing_vehicles = await self.find_by_filters(ref_no__in=ref_nos)
            existing_ref_nos = {v.ref_no for v in db_existing_vehicles}
            logger.debug(f"数据库查询完成: 发现 {len(existing_ref_nos)}/{len(ref_nos)} 个已存在车辆")
        
        return existing_ref_nos
    
    async def invalidate_vehicle_cache(self, ref_no: str) -> None:
        """使车辆缓存失效（用于删除或更新车辆时）"""
        cache_key = f"vehicle_exists:{ref_no}"
        await self.cache_service.delete(cache_key)
    
    async def _build_existing_vehicles_cache(self) -> None:
        """
        构建已存在车辆的缓存（用于冷启动或缓存预热）
        仅在必要时调用，如每周全量爬取开始前
        """
        CACHE_KEY_PREFIX = "vehicle_exists:"
        CACHE_TTL = 7200  # 2小时缓存
        
        logger.info("开始构建车辆存在性缓存...")
        
        # 分批查询所有车辆的ref_no
        batch_size = 1000
        offset = 0
        total_cached = 0
        
        while True:
            vehicles = await self.find_by_filters(
                is_deleted=0,
                limit=batch_size,
                offset=offset,
                order_by=["id"]
            )
            
            if not vehicles:
                break
                
            # 批量设置缓存
            for vehicle in vehicles:
                cache_key = f"{CACHE_KEY_PREFIX}{vehicle.ref_no}"
                await self.cache_service.set(cache_key, "EXISTS", CACHE_TTL)
                total_cached += 1
            
            offset += batch_size
            logger.debug(f"已缓存 {total_cached} 条车辆记录...")
        
        logger.info(f"车辆存在性缓存构建完成，共缓存 {total_cached} 条记录")
    
    @transactional
    async def batch_update_vehicle_status(self, vehicle_ids: List[str], status_tag: str) -> int:
        """
        批量更新车辆状态 - 老老实实用原生SQL
        
        Args:
            vehicle_ids: 车辆ID列表
            status_tag: 状态标签 (normal, sale, new, sold)
            
        Returns:
            成功更新的车辆数量
        """
        if not vehicle_ids:
            return 0
        
        logger.info(f"开始批量更新{len(vehicle_ids)}辆车状态为: {status_tag}")
        
        # 【正确做法】使用框架的批量更新方法
        updated_count = 0
        batch_size = 100  # 分批处理，避免SQL过长
        
        for i in range(0, len(vehicle_ids), batch_size):
            batch_ids = vehicle_ids[i:i + batch_size]
            
            try:
                # 使用框架的批量更新，通过BaseService的透明代理
                batch_updated = await self.batch_update_by_filters(
                    filters={'id__in': batch_ids},
                    update_data={'status_tag': status_tag}
                )
                updated_count += batch_updated
                logger.debug(f"批次{i//batch_size + 1}: 更新{batch_updated}辆车")
                
            except Exception as e:
                logger.error(f"批量更新批次{i//batch_size + 1}失败: {e}")
                # 这个批次失败了，继续下一批次
                continue
        
        logger.info(f"批量更新车辆状态完成: {updated_count}/{len(vehicle_ids)} 辆车状态更新为 {status_tag}")
        return updated_count