"""
PicknBuy24车辆详情页提取器 - 基于实际页面完整提取
"""
import re
from typing import Dict, Any, List, Tuple
from bs4 import BeautifulSoup
from urllib.parse import urljoin

from axiom_boot.logging.setup import get_logger
from axiom_boot.scraper.interfaces import Extractor
from axiom_boot.scraper.models import Response, Target, Item

logger = get_logger(__name__)


class VehicleDetailItem(Item):
    """车辆详情数据项"""
    vehicle_data: Dict[str, Any]
    success: bool = True


class VehicleDetailExtractor(Extractor):
    """PicknBuy24车辆详情页提取器 - 完整字段提取"""

    def extract(self, response: Response, target: Target) -> List[Item]:
        """
        提取车辆详细信息 - 匹配所有模型字段
        
        Returns:
            List[VehicleDetailItem]: 车辆详情项目列表
        """
        try:
            soup = BeautifulSoup(response.text, "html.parser")
            
            logger.info(f"开始提取车辆详情: {response.url}")
            
            # 提取所有字段
            vehicle_data = {
                # 基本标识
                "ref_no": self._extract_ref_no(soup, response.url),
                "detail_url": response.url,
                "source": "picknbuy24",
                
                # 基本车辆信息
                "year": self._extract_year(soup),
                "make": self._extract_make(soup),
                "model": self._extract_model(soup),
                # 价格信息将在后面解析
                "currency": "",
                "price": 0.0,
                "price_original": "",
                "price_before_discount": 0.0,
                "discount_amount": 0.0,
                "has_discount": 0,
                
                # 技术规格
                "mileage_km": self._extract_mileage_km(soup),
                "mileage_miles": self._extract_mileage_miles(soup),
                "engine_cc": self._extract_engine_cc(soup),
                "fuel_type": self._extract_fuel_type(soup),
                "transmission": self._extract_transmission(soup),
                "drivetrain": self._extract_drivetrain(soup),
                "steering": self._extract_steering(soup),
                
                # 车身信息
                "seats": self._extract_seats(soup),
                "doors": self._extract_doors(soup),
                "color": self._extract_color(soup),
                
                # 尺寸重量
                "length_m": self._extract_length(soup),
                "width_m": self._extract_width(soup),
                "height_m": self._extract_height(soup),
                "weight_kg": self._extract_weight(soup),
                "cubic_meter": self._extract_cubic_meter(soup),
                
                # PicknBuy24特有字段
                "chassis": self._extract_chassis(soup),
                "chassis_no_full": self._extract_chassis_no_full(soup),
                "reg_year_month": self._extract_reg_year_month(soup),
                "location": self._extract_location(soup),
                "engine_type": self._extract_engine_type(soup),
                "seatbelts_year": self._extract_seatbelts_year(soup),
                
                # 配置分类
                "exterior_options": self._extract_exterior_options(soup),
                "interior_options": self._extract_interior_options(soup),
                "safety_features": self._extract_safety_features(soup),
                "convenient_features": self._extract_convenient_features(soup),
                "multimedia_features": self._extract_multimedia_features(soup),
                "equipment_features": self._extract_equipment_features(soup),
                "remarkable_features": self._extract_remarkable_features(soup),
                
                # 商品状态标签
                "status_tag": self._extract_status_tag(soup),
                
                # 临时占位，稍后计算
                "vehicle_type": "",
                
                # 图片 - 根据模式决定是否提取  
                "images": self._extract_images(soup, response.url) if target.metadata.get("extract_images", True) else []
            }
            
            # 添加图片数量字段
            if target.metadata.get("extract_images", True):
                # 提取了图片，使用实际数量
                vehicle_data["image_count"] = len(vehicle_data["images"])
            else:
                # 没有提取图片，但仍需要统计数量用于状态管理
                image_urls = self._extract_images(soup, response.url)
                vehicle_data["image_count"] = len(image_urls)
            
            # 车辆类型分层推断
            vehicle_type, confidence = self._get_vehicle_type_with_confidence(vehicle_data, target)
            vehicle_data["vehicle_type"] = vehicle_type
            vehicle_data["vehicle_type_confidence"] = confidence
            
            # 价格解析 - 直接从详情页解析折扣信息
            price_info = self._parse_price_with_discount_from_detail_page(soup)
            vehicle_data["currency"] = price_info["currency"]
            vehicle_data["price"] = price_info["price"]
            vehicle_data["price_original"] = price_info["price_original"]
            vehicle_data["price_before_discount"] = price_info["price_before_discount"]
            vehicle_data["discount_amount"] = price_info["discount_amount"]
            vehicle_data["has_discount"] = price_info["has_discount"]
            

            
            # 生成vehicle_id
            vehicle_data["vehicle_id"] = f"picknbuy24_{vehicle_data['ref_no']}"
            
            logger.info(f"车辆详情提取完成: {vehicle_data.get('ref_no', 'Unknown')} - 状态标签: {vehicle_data.get('status_tag', 'None')}")
            
            item = VehicleDetailItem(vehicle_data=vehicle_data, success=True)
            return [item]
            
        except Exception as e:
            logger.error(f"车辆详情提取失败: {e}")
            return []
    
    def _extract_ref_no(self, soup: BeautifulSoup, url: str) -> str:
        """提取参考号 - 从URL或页面"""
        match = re.search(r'refno=([^&]+)', url)
        if match:
            return match.group(1)
        
        ref_element = soup.find(text=re.compile(r'Ref\s*No', re.I))
        if ref_element:
            match = re.search(r'(\d+)', ref_element)
            if match:
                return match.group(1)
        return ""
    
    def _extract_year(self, soup: BeautifulSoup) -> str:
        """提取年份 - 从标题如'2008 DAIHATSU MOVE L'"""
        text = soup.get_text()
        
        # 多种年份提取模式
        patterns = [
            r'(\d{4})\s+[A-Z]+\s+[A-Z]',  # 原模式：年份 + 品牌 + 型号
            r'(\d{4})\s+[A-Z]+',          # 年份 + 品牌
            r'\b(\d{4})\b'                # 独立的四位数字
        ]
        
        for pattern in patterns:
            matches = re.findall(pattern, text)
            for match in matches:
                year = int(match)
                # 验证年份合理性（汽车制造年份范围）
                if 1900 <= year <= 2030:
                    return str(year)
        
        return ""
    
    def _extract_make(self, soup: BeautifulSoup) -> str:
        """提取品牌"""
        text = soup.get_text()
        match = re.search(r'\d{4}\s+([A-Z]+)\s+[A-Z]', text)
        return match.group(1) if match else ""
    
    def _extract_model(self, soup: BeautifulSoup) -> str:
        """提取型号"""
        text = soup.get_text()
        match = re.search(r'\d{4}\s+[A-Z]+\s+([A-Z\s]+)', text)
        return match.group(1).strip() if match else ""
    

    
    def _parse_number_with_commas(self, text: str) -> int:
        """解析带千分符的数字字符串为整数"""
        try:
            # 移除千分符和空格，只保留数字
            clean_text = re.sub(r'[,\s]', '', text)
            return int(clean_text) if clean_text.isdigit() else 0
        except:
            return 0
    
    def _extract_mileage_km(self, soup: BeautifulSoup) -> int:
        """提取公里数 - 返回数值类型"""
        try:
            # 真实HTML结构1: 详情页specs区域 <div class="spec">115,000 km</div>
            spec_divs = soup.find_all('div', class_='spec')
            for spec in spec_divs:
                spec_text = spec.get_text(strip=True)
                if 'km' in spec_text:
                    km_match = re.search(r'([\d,]+)\s*km', spec_text, re.I)
                    if km_match:

                        return self._parse_number_with_commas(km_match.group(1))
            
            # 真实HTML结构2: 表格中的 115,000 km<br>71,300 mile
            tables = soup.find_all('table')
            for table in tables:
                tds = table.find_all('td')
                for td in tds:
                    td_text = td.get_text(' ', strip=True)
                    km_match = re.search(r'([\d,]+)\s*km', td_text, re.I)
                    if km_match:
                        km_value = km_match.group(1)

                        return self._parse_number_with_commas(km_value)
            
            # 备用方法：全文搜索
            text = soup.get_text()
            match = re.search(r'([\d,]+)\s*km', text, re.I)
            return self._parse_number_with_commas(match.group(1)) if match else 0
        except Exception as e:
            logger.warning(f"提取里程失败: {e}")
            return 0
    
    def _extract_mileage_miles(self, soup: BeautifulSoup) -> int:
        """提取英里数 - 返回数值类型"""
        try:
            text = soup.get_text()
            match = re.search(r'([\d,]+)\s*mile', text, re.I)
            return self._parse_number_with_commas(match.group(1)) if match else 0
        except Exception as e:
            logger.warning(f"提取英里数失败: {e}")
            return 0
    
    def _extract_engine_cc(self, soup: BeautifulSoup) -> int:
        """提取排量 - 返回数值类型"""
        try:
            text = soup.get_text()
            match = re.search(r'([\d,]+)\s*cc', text, re.I)
            return self._parse_number_with_commas(match.group(1)) if match else 0
        except Exception as e:
            logger.warning(f"提取排量失败: {e}")
            return 0
    
    def _extract_fuel_type(self, soup: BeautifulSoup) -> str:
        """提取燃料类型"""
        text = soup.get_text()
        fuels = ['Petrol', 'Diesel', 'Hybrid', 'Electric']
        for fuel in fuels:
            if fuel in text:
                return fuel
        return ""
    
    def _extract_transmission(self, soup: BeautifulSoup) -> str:
        """提取变速箱 - 如'MT ( F 5)'"""
        text = soup.get_text()
        match = re.search(r'(MT\s*\([^)]+\)|AT\s*\([^)]+\)|MT|AT)', text)
        return match.group(1) if match else ""
    
    def _extract_drivetrain(self, soup: BeautifulSoup) -> str:
        """提取驱动方式"""
        text = soup.get_text()
        if '4WD' in text:
            return "4WD"
        elif '2WD' in text:
            return "2WD"
        return ""
    
    def _extract_steering(self, soup: BeautifulSoup) -> str:
        """提取方向盘位置 - 从Specs部分"""
        text = soup.get_text()
        match = re.search(r'Steering\s*(Left|Right)', text, re.I)
        return match.group(1) if match else ""
    
    def _extract_seats(self, soup: BeautifulSoup) -> str:
        """提取座位数 - 兼容列表页表格格式"""
        try:
            # 方法1: 查找表格中的座位信息（根据您的HTML）
            tables = soup.find_all('table')
            for table in tables:
                tds = table.find_all('td')
                for td in tds:
                    td_text = td.get_text(strip=True)
                    # 匹配格式: <span>4</span><br>seats
                    span_match = re.search(r'(\d+)\s*seats?', td_text, re.I)
                    if span_match:
                        return span_match.group(1)
            
            # 方法2: 查找span标签后跟seats文本
            spans = soup.find_all('span')
            for span in spans:
                span_text = span.get_text(strip=True)
                if span_text.isdigit():
                    # 检查后续文本是否包含seats
                    next_text = ""
                    if span.next_sibling:
                        next_text = str(span.next_sibling)
                    elif span.parent and span.parent.next_sibling:
                        next_text = str(span.parent.next_sibling)
                    
                    if 'seats' in next_text.lower():
                        return span_text
            
            # 方法3: 原有的文本匹配
            text = soup.get_text()
            match = re.search(r'(\d+)\s*seats?', text, re.I)
            if match:
                return match.group(1)
            match = re.search(r'Seats\s*(\d+)', text, re.I)
            return match.group(1) if match else ""
            
        except Exception as e:
            logger.warning(f"提取座位数失败: {e}")
            return ""
    
    def _extract_doors(self, soup: BeautifulSoup) -> str:
        """提取门数 - 兼容列表页表格格式"""
        try:
            # 方法1: 查找表格中的门数信息（根据您的HTML）
            tables = soup.find_all('table')
            for table in tables:
                tds = table.find_all('td')
                for td in tds:
                    td_text = td.get_text(strip=True)
                    # 匹配格式: <span>5</span><br>doors
                    span_match = re.search(r'(\d+)\s*doors?', td_text, re.I)
                    if span_match:
                        return span_match.group(1)
            
            # 方法2: 查找span标签后跟doors文本
            spans = soup.find_all('span')
            for span in spans:
                span_text = span.get_text(strip=True)
                if span_text.isdigit():
                    # 检查后续文本是否包含doors
                    next_text = ""
                    if span.next_sibling:
                        next_text = str(span.next_sibling)
                    elif span.parent and span.parent.next_sibling:
                        next_text = str(span.parent.next_sibling)
                    
                    if 'doors' in next_text.lower():
                        return span_text
            
            # 方法3: 原有的文本匹配
            text = soup.get_text()
            match = re.search(r'(\d+)\s*doors?', text, re.I)
            if match:
                return match.group(1)
            match = re.search(r'Doors\s*(\d+)', text, re.I)
            return match.group(1) if match else ""
            
        except Exception as e:
            logger.warning(f"提取门数失败: {e}")
            return ""
    
    def _extract_color(self, soup: BeautifulSoup) -> str:
        """提取颜色 - 从Specs部分"""
        text = soup.get_text()
        match = re.search(r'Colour\s*([A-Za-z\s]+?)(?:\s*Steering|\s*Cubic|\s*$)', text, re.I)
        if match:
            color = match.group(1).strip()
            # 清理多余的空白字符
            color = re.sub(r'\s+', ' ', color)
            # 移除常见的非颜色后缀
            color = re.sub(r'\s*(Steering|Cubic meter|Right|Left).*$', '', color, flags=re.I)
            return color.strip()
        return ""
    
    def _extract_length(self, soup: BeautifulSoup) -> str:
        """提取长度"""
        text = soup.get_text()
        match = re.search(r'Length\s*([\d.]+\s*m)', text, re.I)
        return match.group(1) if match else ""
    
    def _extract_width(self, soup: BeautifulSoup) -> str:
        """提取宽度"""
        text = soup.get_text()
        match = re.search(r'Width\s*([\d.]+\s*m)', text, re.I)
        return match.group(1) if match else ""
    
    def _extract_height(self, soup: BeautifulSoup) -> str:
        """提取高度"""
        text = soup.get_text()
        match = re.search(r'Height\s*([\d.]+\s*m)', text, re.I)
        return match.group(1) if match else ""
    
    def _extract_weight(self, soup: BeautifulSoup) -> str:
        """提取重量"""
        text = soup.get_text()
        match = re.search(r'Weight\s*([\d,]+\s*kg)', text, re.I)
        return match.group(1) if match else ""
    
    def _extract_cubic_meter(self, soup: BeautifulSoup) -> str:
        """提取立方米"""
        text = soup.get_text()
        match = re.search(r'Cubic meter\s*([\d.]+\s*m3)', text, re.I)
        return match.group(1) if match else ""
    
    def _extract_chassis(self, soup: BeautifulSoup) -> str:
        """提取底盘型号 - 兼容列表页表格格式"""
        try:
            # 方法1: 查找表格中的底盘信息（根据您的HTML：DBA-L375S）
            tables = soup.find_all('table')
            for table in tables:
                tds = table.find_all('td')
                for td in tds:
                    td_text = td.get_text(strip=True)
                    # 匹配底盘格式：DBA-L375S, GH-Z11 等
                    chassis_match = re.search(r'\b([A-Z]{2,4}-[A-Z0-9]+)\b', td_text)
                    if chassis_match:
                        return chassis_match.group(1)
            
            # 方法2: 原有的文本匹配
            text = soup.get_text()
            match = re.search(r'Chassis\s*([A-Z]*-?[A-Z0-9]+)', text, re.I)
            if match:
                # 清理提取到的chassis数据，移除多余空白字符
                chassis = match.group(1).strip()
                chassis = re.sub(r'\s+', ' ', chassis)  # 将多个空白字符替换为单个空格
                chassis = chassis.replace(' -', '-').replace('- ', '-')  # 清理连字符周围的空格
                return chassis
            
            # 方法3: 直接在文本中查找底盘格式
            chassis_pattern = r'\b([A-Z]{2,4}-[A-Z0-9]+)\b'
            chassis_match = re.search(chassis_pattern, text)
            return chassis_match.group(1) if chassis_match else ""
            
        except Exception as e:
            logger.warning(f"提取底盘型号失败: {e}")
            return ""
    
    def _extract_chassis_no_full(self, soup: BeautifulSoup) -> str:
        """提取完整底盘号 - 如'L175S-0180219'"""
        text = soup.get_text()
        match = re.search(r'Chassis No\s*([A-Z0-9-]+)', text, re.I)
        if match:
            # 清理提取到的chassis_no_full数据，移除多余空白字符
            chassis_no = match.group(1).strip()
            chassis_no = re.sub(r'\s+', '', chassis_no)  # 移除所有空白字符
            return chassis_no
        return ""
    
    def _extract_reg_year_month(self, soup: BeautifulSoup) -> str:
        """提取注册年月 - 如'2008/Sep'"""
        text = soup.get_text()
        match = re.search(r'Reg\.\s*Year\s*(\d{4}/\w+)', text, re.I)
        return match.group(1) if match else ""
    
    def _extract_location(self, soup: BeautifulSoup) -> str:
        """提取位置 - 如'Yokohama'"""
        text = soup.get_text()
        match = re.search(r'Location\s*([A-Za-z\s]+?)\s*-', text, re.I)
        return match.group(1).strip() if match else ""
    
    def _extract_engine_type(self, soup: BeautifulSoup) -> str:
        """提取发动机类型"""
        # 尝试从规格表中提取
        spec_table = soup.find('table', class_='spec-table') or soup.find('table')
        if spec_table:
            rows = spec_table.find_all('tr')
            for row in rows:
                cells = row.find_all(['td', 'th'])
                if len(cells) >= 2:
                    key = cells[0].get_text(strip=True).lower()
                    if 'engine' in key and 'type' in key:
                        return cells[1].get_text(strip=True)
        
        # 备用方案：查找包含Engine Type的文本块
        engine_divs = soup.find_all(['div', 'span', 'p'], string=re.compile(r'Engine.*Type', re.I))
        for div in engine_divs:
            # 查找下一个兄弟节点或父节点的值
            next_sibling = div.find_next_sibling()
            if next_sibling:
                engine_text = next_sibling.get_text(strip=True)
                if engine_text and not re.match(r'^(Colour|Steering|Cubic)', engine_text, re.I):
                    return engine_text
                    
        return ""
    
    def _extract_seatbelts_year(self, soup: BeautifulSoup) -> str:
        """提取安全带年份"""
        # 尝试从规格表中提取
        spec_table = soup.find('table', class_='spec-table') or soup.find('table')
        if spec_table:
            rows = spec_table.find_all('tr')
            for row in rows:
                cells = row.find_all(['td', 'th'])
                if len(cells) >= 2:
                    key = cells[0].get_text(strip=True).lower()
                    if 'seatbelt' in key and 'year' in key:
                        year_text = cells[1].get_text(strip=True)
                        # 提取4位数字年份
                        year_match = re.search(r'\b(19|20)\d{2}\b', year_text)
                        return year_match.group(0) if year_match else year_text
        
        # 备用方案：在文本中搜索
        seatbelt_divs = soup.find_all(['div', 'span', 'p'], string=re.compile(r'Seatbelt.*Year', re.I))
        for div in seatbelt_divs:
            next_sibling = div.find_next_sibling()
            if next_sibling:
                year_text = next_sibling.get_text(strip=True)
                year_match = re.search(r'\b(19|20)\d{2}\b', year_text)
                if year_match:
                    return year_match.group(0)
                    
        return ""
    
    def _extract_exterior_options(self, soup: BeautifulSoup) -> List[str]:
        """提取外观选项"""
        return self._extract_features_by_section(soup, "Exterior Options")
    
    def _extract_interior_options(self, soup: BeautifulSoup) -> List[str]:
        """提取内饰选项"""
        return self._extract_features_by_section(soup, "Interior Options")
    
    def _extract_safety_features(self, soup: BeautifulSoup) -> List[str]:
        """提取安全配置"""
        return self._extract_features_by_section(soup, "Safety")
    
    def _extract_convenient_features(self, soup: BeautifulSoup) -> List[str]:
        """提取便利配置"""
        return self._extract_features_by_section(soup, "Convenient")
    
    def _extract_multimedia_features(self, soup: BeautifulSoup) -> List[str]:
        """提取多媒体配置"""
        return self._extract_features_by_section(soup, "Multimedia")
    
    def _extract_equipment_features(self, soup: BeautifulSoup) -> List[str]:
        """提取装备配置"""
        return self._extract_features_by_section(soup, "Equipment")
    
    def _extract_features_by_section(self, soup: BeautifulSoup, section_name: str) -> List[str]:
        """根据分类提取配置特性 - 兼容列表页和详情页"""
        features = []
        
        try:
            # 方法1: 从列表页的carOption容器提取（根据您的HTML）
            car_option = soup.find('div', class_='carOption')
            if car_option:
                # 查找ul/li列表
                ul_list = car_option.find('ul')
                if ul_list:
                    li_items = ul_list.find_all('li')
                    for li in li_items:
                        feature_text = li.get_text(strip=True)
                        if feature_text and len(feature_text) > 1:
                            features.append(feature_text)

                    return features
            
            # 方法2: 查找Features区域（详情页）
            option_box = soup.find('div', class_='optionBox')
            if option_box:
                # 在optionBox中查找对应的section
                option_block = option_box.find('div', class_='optionBlock')
                if option_block:
                    # 查找所有的dl元素
                    dl_elements = option_block.find_all('dl')
                    
                    for dl in dl_elements:
                        # 检查这个dl是否包含我们要找的section
                        dt = dl.find('dt')
                        if dt and section_name in dt.get_text(strip=True):
                            # 找到了对应的section，提取所有class="active"的dd元素
                            active_items = dl.find_all('dd', class_='active')
                            
                            for item in active_items:
                                text = item.get_text(strip=True)
                                if text and len(text) > 1:
                                    features.append(text)
                            break
            
            # 方法3: 直接搜索所有li元素
            if not features:
                all_li = soup.find_all('li')
                common_features = ['Air Conditioning', 'Power Steering', 'Power Windows', 
                                 'Double Air Bag', 'Antilock Braking System', 'CD Player']
                
                for li in all_li:
                    li_text = li.get_text(strip=True)
                    if li_text in common_features:
                        features.append(li_text)
            
            return features
            
        except Exception as e:
            logger.warning(f"提取配置特性失败: {e}")
            return []
    
    def _extract_remarkable_features(self, soup: BeautifulSoup) -> List[str]:
        """提取特别配置说明 - 返回数组格式"""
        remarkable_features = []
        
        section_element = soup.find(text=re.compile("Remarkable Features", re.I))
        if section_element and section_element.parent:
            # 查找Remarkable Features后面的列表容器
            current = section_element.parent.next_sibling
            while current:
                if hasattr(current, 'find_all'):
                    # 尝试查找列表项 (li, div, span等)
                    list_items = current.find_all(['li', 'div', 'span'])
                    
                    if list_items:
                        # 找到列表项，逐个提取
                        for item in list_items:
                            text = item.get_text(strip=True)
                            if text and len(text) > 1 and text not in remarkable_features:
                                # 过滤掉明显不是特性的内容
                                if not text.lower().startswith(('remarkable', 'features', 'specification')):
                                    remarkable_features.append(text)
                    else:
                        # 没有找到列表项，尝试按文本分割
                        text = current.get_text(strip=True)
                        if text and len(text) > 2:
                            # 按分号、逗号或换行分割
                            if ',' in text or ';' in text or '\n' in text:
                                features = re.split(r'[;,\n]\s*', text)
                                for feature in features:
                                    feature = feature.strip()
                                    if feature and len(feature) > 1 and feature not in remarkable_features:
                                        if not feature.lower().startswith(('remarkable', 'features', 'specification')):
                                            remarkable_features.append(feature)
                            else:
                                # 单个特性
                                if text not in remarkable_features:
                                    if not text.lower().startswith(('remarkable', 'features', 'specification')):
                                        remarkable_features.append(text)
                
                current = current.next_sibling
                if not current:
                    break
        
        return remarkable_features[:50]  # 限制数组元素数量为50个
    
    def _extract_images(self, soup: BeautifulSoup, base_url: str) -> List[Dict[str, str]]:
        """提取PicknBuy24车辆大图 - 从gallery区域提取大图链接"""
        images = []
        
        # 添加调试信息
        import logging
        logger = logging.getLogger(__name__)
        
        # 查找图片廊gallery区域
        gallery = soup.find("div", class_="gallery")
        logger.info(f"查找gallery区域: {'找到' if gallery else '未找到'}")
        
        if gallery:
            # 查找所有figure元素
            figures = gallery.find_all("figure")
            logger.info(f"在gallery中找到{len(figures)}个figure元素")
            
            for i, figure in enumerate(figures):
                # 查找<a>标签的href属性，这里是大图链接
                link = figure.find("a")
                if link and link.get("href"):
                    large_img_url = link.get("href")
                    logger.info(f"提取大图链接 {i+1}: {large_img_url}")
                    
                    # 构建完整URL
                    full_url = urljoin(base_url, large_img_url)
                    
                    # 确定文件扩展名
                    ext = ".jpg"
                    if ".png" in large_img_url.lower():
                        ext = ".png"
                    elif ".jpeg" in large_img_url.lower():
                        ext = ".jpeg"
                    elif ".webp" in large_img_url.lower():
                        ext = ".webp"
                    
                    images.append({
                        "url": full_url,
                        "type": "large",
                        "index": i + 1,
                        "filename": f"vehicle_{i+1:02d}{ext}"
                    })
        
        # 如果没有找到gallery，回退到查找所有<a>标签中的图片链接
        if not images:
            logger.info("gallery方式未找到图片，尝试回退方式")
            links = soup.find_all("a", href=True)
            logger.info(f"找到{len(links)}个<a>标签")
            
            for i, link in enumerate(links):
                href = link.get("href")
                if href and any(ext in href.lower() for ext in ["_l3.jpg", "_l.jpg", "_large.jpg"]):
                    logger.info(f"回退方式找到图片: {href}")
                    full_url = urljoin(base_url, href)
                    
                    ext = ".jpg"
                    if ".png" in href.lower():
                        ext = ".png"
                    elif ".jpeg" in href.lower():
                        ext = ".jpeg"
                    elif ".webp" in href.lower():
                        ext = ".webp"
                    
                    images.append({
                        "url": full_url,
                        "type": "large",
                        "index": len(images) + 1,
                        "filename": f"fallback_{len(images)+1:02d}{ext}"
                    })
        
        logger.info(f"最终提取到{len(images)}张图片")
        return images
    
    def _extract_status_tag(self, soup: BeautifulSoup) -> str:
        """提取商品状态标签 - 只检查PicknBuy24的icon类"""
        try:
            # 只检查PicknBuy24特有的icon类
            # SALE标签: <span class="iconSale"></span>
            if soup.find('span', class_='iconSale'):
                logger.debug(f"发现iconSale标签")
                return "sale"
            
            # NEW标签: <span class="iconNew"></span>  
            if soup.find('span', class_='iconNew'):
                logger.debug(f"发现iconNew标签")
                return "new"
            
            # SOLD标签: <img src="/images/stock/soldout.gif" id="soldout">
            soldout_img = soup.find('img', id='soldout')
            if soldout_img and 'soldout' in soldout_img.get('src', '').lower():
                logger.debug(f"发现soldout.gif图片")
                return "sold"
                
            # 备选SOLD检测: <span class="iconSold"></span> (保留兼容性)
            if soup.find('span', class_='iconSold'):
                logger.debug(f"发现iconSold标签")
                return "sold"
            
            # 没找到任何状态标签，返回normal
            logger.debug(f"未找到任何icon状态标签，返回normal")
            return "normal"
            
        except Exception as e:
            logger.warning(f"提取商品状态标签失败: {e}")
            return "normal"
    
    def _infer_vehicle_type(self, vehicle_data: Dict[str, Any]) -> str:
        """根据车辆信息智能推断车辆类型"""
        try:
            make = vehicle_data.get("make", "").upper()
            model = vehicle_data.get("model", "").upper()
            
            # 基于品牌和型号的规则推断
            model_lower = model.lower()
            
            # SUV关键词
            suv_keywords = ['suv', 'x-trail', 'forester', 'outback', 'cr-v', 'hr-v', 'rav4', 
                           'prado', 'pajero', 'delica', 'escudo', 'jimny', 'crossover',
                           'harrier', 'vanguard', 'kluger', 'highlander', 'cx-5', 'cx-7']
            
            # WAGON关键词  
            wagon_keywords = ['wagon', 'touring', 'estate', 'legacy', 'levorg', 'caldina',
                             'wingroad', 'stagea', 'accord wagon', 'atenza wagon']
            
            # COMPACT关键词
            compact_keywords = ['march', 'note', 'fit', 'vitz', 'yaris', 'swift', 'alto', 
                               'mira', 'move', 'tanto', 'daihatsu', 'wagon r', 'life',
                               'coo', 'boon', 'passo', 'ist', 'bb']
            
            # TRUCK关键词
            truck_keywords = ['truck', 'hiace', 'titan', 'dyna', 'canter', 'ranger',
                             'hilux', 'navara', 'carry', 'acty', 'mini truck']
            
            # VAN关键词
            van_keywords = ['van', 'serena', 'noah', 'voxy', 'stepwagon', 'freed',
                           'sienta', 'wish', 'stream', 'presage', 'lafesta']
            
            # SPORTY关键词
            sporty_keywords = ['sport', 'gtr', 'skyline', 'fairlady', 'rx-7', 'rx-8',
                              'mx-5', 'roadster', 'coupe', 'convertible', 'impreza sti',
                              'lancer evolution', 'supra', 'mr2', '86', 'brz']
            
            # 检查关键词匹配
            if any(keyword in model_lower for keyword in suv_keywords):
                return "SUV"
            elif any(keyword in model_lower for keyword in wagon_keywords):
                return "WAGON"
            elif any(keyword in model_lower for keyword in compact_keywords):
                return "COMPACT"
            elif any(keyword in model_lower for keyword in truck_keywords):
                return "TRUCK"
            elif any(keyword in model_lower for keyword in van_keywords):
                return "VAN"
            elif any(keyword in model_lower for keyword in sporty_keywords):
                return "SPORTY"
            
            # 基于车身规格推断
            seats = vehicle_data.get("seats", "")
            doors = vehicle_data.get("doors", "")
            engine_cc = vehicle_data.get("engine_cc", "")
            
            # 提取数字
            try:
                seats_num = int(re.search(r'\d+', seats).group()) if seats and re.search(r'\d+', seats) else 0
                doors_num = int(re.search(r'\d+', doors).group()) if doors and re.search(r'\d+', doors) else 0
                engine_num = int(re.search(r'\d+', engine_cc).group()) if engine_cc and re.search(r'\d+', engine_cc) else 0
            except:
                seats_num = doors_num = engine_num = 0
            
            # 基于规格推断
            if seats_num >= 7:  # 7座以上通常是VAN
                return "VAN"
            elif engine_num <= 660:  # 660cc以下通常是COMPACT(轻自动车)
                return "COMPACT"
            elif doors_num == 2:  # 2门通常是SPORTY
                return "SPORTY"
            elif seats_num <= 4 and doors_num <= 3:  # 小型车
                return "COMPACT"
            
            # 默认返回SEDAN
            return "SEDAN"
            
        except Exception as e:
            logger.warning(f"推断车辆类型失败: {e}")
            return ""
    
    def _parse_numeric_price(self, price_text: str):
        """解析价格文本为数字"""
        try:
            if not price_text:
                return None
            # 移除货币符号和逗号，提取数字
            import re
            from decimal import Decimal
            numeric_str = re.sub(r'[^\d.]', '', price_text)
            if numeric_str:
                return Decimal(numeric_str)
            return None
        except Exception:
            return None
    
    def _parse_numeric_discount(self, discount_text: str):
        """解析折扣金额文本为数字"""
        try:
            if not discount_text:
                return None
            # 移除符号和货币标记，提取数字
            import re
            from decimal import Decimal
            numeric_str = re.sub(r'[^\d.]', '', discount_text)
            if numeric_str:
                return Decimal(numeric_str)
            return None
        except Exception:
            return None
    
    def _parse_price_with_discount_from_detail_page(self, soup: BeautifulSoup) -> Dict[str, Any]:
        """从详情页解析价格和折扣信息 - 基于真实HTML结构"""
        result = {
            "currency": "USD",
            "price": None,
            "price_original": "",
            "price_before_discount": None,
            "discount_amount": None,
            "has_discount": 0
        }
        
        try:
            # 查找价格容器 <div class="carPrice">
            car_price_div = soup.find('div', class_='carPrice')
            if not car_price_div:
                logger.warning("未找到价格容器 div.carPrice")
                return result
            

            
            # 1. 提取现价 <span class="fob">US$ <span id="fobPrice">1,730</span></span>
            fob_span = car_price_div.find('span', class_='fob')
            if fob_span:
                fob_price_span = fob_span.find('span', id='fobPrice')
                if fob_price_span:
                    current_price_text = fob_price_span.get_text(strip=True)
                    result["price"] = self._parse_numeric_price(f"US$ {current_price_text}")
                    result["price_original"] = f"US$ {current_price_text}"

            
            # 2. 提取原价 <span class="original">US$ <span id="oriPrice">1,760</span></span>
            original_span = car_price_div.find('span', class_='original')
            if original_span:
                logger.debug(f"找到原价span: {original_span}")
                ori_price_span = original_span.find('span', id='oriPrice')
                if ori_price_span:
                    original_price_text = ori_price_span.get_text(strip=True)
                    result["price_before_discount"] = self._parse_numeric_price(f"US$ {original_price_text}")
                    result["has_discount"] = 1

                else:
                    logger.debug("未找到id='oriPrice'的span")
            else:
                logger.debug("未找到class='original'的span")
            
            # 3. 提取折扣金额 <span class="clearance">-US $ 30</span>
            clearance_span = car_price_div.find('span', class_='clearance')
            if clearance_span:
                discount_text = clearance_span.get_text(strip=True)
                result["discount_amount"] = self._parse_numeric_discount(discount_text)
                result["has_discount"] = 1

            else:
                logger.debug("未找到class='clearance'的span")
            
            # 如果没有折扣信息，将现价作为原价
            if not result["has_discount"]:
                result["price_original"] = result.get("price_original", "")
            
            logger.info(f"价格解析完成 - 现价: {result['price']}, 原价: {result['price_before_discount']}, 折扣: {result['discount_amount']}, 有折扣: {bool(result['has_discount'])}")
            
        except Exception as e:
            logger.error(f"解析详情页价格失败: {e}")
        
        return result
    
    def _get_vehicle_type_with_confidence(self, vehicle_data: Dict[str, Any], target: Target) -> Tuple[str, str]:
        """分层推断车辆类型并返回置信度"""
        try:
            # 第1层：从URL/列表页获取（最准确）
            url_type = target.metadata.get("vehicle_type", "")
            if url_type and url_type in ['SEDAN', 'SUV', 'WAGON', 'COMPACT', 'SPORTY', 'TRUCK', 'VAN', 'SPECIAL']:
                logger.debug(f"从URL/列表页获取车辆类型: {url_type} (HIGH置信度)")
                return url_type, "HIGH"
            
            # 第2层：关键词精确匹配（中等准确）
            make = vehicle_data.get("make", "").upper()
            model = vehicle_data.get("model", "").upper()
            model_lower = model.lower()
            
            # 精确的车型关键词匹配
            precise_mappings = {
                # SUV - 明确的SUV车型
                'SUV': ['x-trail', 'forester', 'outback', 'cr-v', 'hr-v', 'rav4', 'prado', 'pajero', 
                       'delica d:5', 'escudo', 'jimny', 'harrier', 'vanguard', 'kluger', 'highlander', 
                       'cx-5', 'cx-7', 'cx-3', 'vezel'],
                
                # WAGON - 明确的旅行车
                'WAGON': ['touring', 'estate', 'legacy touring wagon', 'levorg', 'caldina', 
                         'wingroad', 'stagea', 'accord wagon', 'atenza wagon', 'avenir'],
                
                # COMPACT - 轻自动车和小型车
                'COMPACT': ['march', 'note', 'fit', 'vitz', 'yaris', 'swift', 'alto', 'mira', 
                           'move', 'tanto', 'wagon r', 'life', 'coo', 'boon', 'passo', 'ist', 'bb',
                           'palette', 'lapin', 'spacia', 'n-box', 'n-one', 'n-wgn'],
                
                # TRUCK - 货车
                'TRUCK': ['truck', 'hiace truck', 'titan', 'dyna', 'canter', 'ranger',
                         'hilux', 'navara', 'carry truck', 'acty truck', 'mini truck'],
                
                # VAN - 商务车/MPV
                'VAN': ['serena', 'noah', 'voxy', 'stepwagon', 'freed', 'sienta', 'wish', 
                       'stream', 'presage', 'lafesta', 'hiace van', 'nv200', 'elgrand'],
                
                # SPORTY - 跑车
                'SPORTY': ['gtr', 'gt-r', 'skyline coupe', 'fairlady', 'rx-7', 'rx-8',
                          'mx-5', 'roadster', 'coupe', 'convertible', 'impreza sti',
                          'lancer evolution', 'supra', 'mr2', '86', 'brz', 'nsx']
            }
            
            for vehicle_type, keywords in precise_mappings.items():
                if any(keyword in model_lower for keyword in keywords):
                    logger.debug(f"关键词匹配车辆类型: {vehicle_type} (MEDIUM置信度)")
                    return vehicle_type, "MEDIUM"
            
            # 第3层：规格推断（较低准确度）
            seats = vehicle_data.get("seats", "")
            doors = vehicle_data.get("doors", "")
            engine_cc = vehicle_data.get("engine_cc", "")
            
            try:
                seats_num = int(re.search(r'\d+', seats).group()) if seats and re.search(r'\d+', seats) else 0
                doors_num = int(re.search(r'\d+', doors).group()) if doors and re.search(r'\d+', doors) else 0
                engine_num = int(re.search(r'\d+', engine_cc).group()) if engine_cc and re.search(r'\d+', engine_cc) else 0
            except:
                seats_num = doors_num = engine_num = 0
            
            # 基于规格的推断规则
            if engine_num <= 660 and engine_num > 0:  # 日本轻自动车
                logger.debug(f"规格推断车辆类型: COMPACT (轻自动车 {engine_num}cc) (LOW置信度)")
                return "COMPACT", "LOW"
            elif seats_num >= 7:  # 7座以上
                logger.debug(f"规格推断车辆类型: VAN ({seats_num}座) (LOW置信度)")
                return "VAN", "LOW"
            elif doors_num == 2:  # 2门车
                logger.debug(f"规格推断车辆类型: SPORTY (2门) (LOW置信度)")
                return "SPORTY", "LOW"
            
            # 第4层：品牌倾向推断（最低准确度）
            brand_tendencies = {
                'DAIHATSU': 'COMPACT',  # 大发主要生产轻自动车
                'SUZUKI': 'COMPACT',    # 铃木也多轻自动车
                'SUBARU': 'SEDAN',      # 斯巴鲁以轿车为主
                'LEXUS': 'SEDAN',       # 雷克萨斯豪华轿车
            }
            
            if make in brand_tendencies:
                inferred_type = brand_tendencies[make]
                logger.debug(f"品牌倾向推断车辆类型: {inferred_type} (LOW置信度)")
                return inferred_type, "LOW"
            
            # 无法确定
            logger.debug("无法确定车辆类型")
            return "UNKNOWN", "UNKNOWN"
            
        except Exception as e:
            logger.warning(f"车辆类型推断失败: {e}")
            return "UNKNOWN", "UNKNOWN"
    

    

    
    def _extract_vehicle_type_from_sidebar(self, soup: BeautifulSoup) -> str:
        """从页面侧边栏的Search by Type部分提取车辆类型"""
        try:
            # 查找"Search by Type"部分
            type_section = soup.find(text=re.compile(r'Search\s+by\s+Type', re.IGNORECASE))
            if not type_section:
                return ""
            
            # 找到包含类型列表的父容器
            type_container = type_section.find_parent(['div', 'section', 'ul'])
            if not type_container:
                return ""
            
            # 查找所有类型链接
            type_links = type_container.find_all('a', href=True)
            
            for link in type_links:
                href = link.get('href', '')
                text = link.get_text(strip=True)
                
                # 检查链接是否包含type参数且文本匹配已知类型
                if 'type=' in href.lower():
                    # 提取类型名称 (忽略数字)
                    type_name = re.sub(r'\s*\(\d+\)\s*', '', text).strip().upper()
                    
                    # 验证是否为有效的车辆类型
                    valid_types = ['SEDAN', 'SUV', 'WAGON', 'COMPACT', 'SPORTY', 'TRUCK', 'VAN', 'SPECIAL', 'SPECIAL VEHICLES']
                    
                    if type_name in valid_types:
                        # 标准化特殊类型名称
                        if type_name == 'SPECIAL VEHICLES':
                            return 'SPECIAL'
                        return type_name
            
            # 如果没有找到链接，尝试查找静态文本
            type_items = type_container.find_all(text=re.compile(r'(Sedan|SUV|Wagon|Compact|Sporty|Truck|Van|Special)', re.IGNORECASE))
            
            for item in type_items:
                type_name = re.sub(r'\s*\(\d+\)\s*', '', item.strip()).upper()
                valid_types = ['SEDAN', 'SUV', 'WAGON', 'COMPACT', 'SPORTY', 'TRUCK', 'VAN', 'SPECIAL']
                
                if type_name in valid_types:
                    return type_name
            
            return ""
            
        except Exception as e:
            logger.warning(f"从侧边栏提取车辆类型失败: {e}")
            return ""