# -*- coding: utf-8 -*-

from fire_control_spider.utils import ContentProcessor
from bs4 import BeautifulSoup
from ..base_parser import BasePageParser


class ProductDetailParser(BasePageParser):
    """新闻详情页解析器"""
    
    def __init__(self, spider):
        super().__init__(spider)
        self.site_name = 'cccf'
        
    
    def can_handle(self, response):
        """判断是否为新闻详情页"""
        return response.meta.get('page_type') == 'cccf_product_detail'
    
    def parse(self, response):
        """解析新闻详情页"""
        import time
        start_time = time.time()
        
        self.logger.info(f"开始处理详情页 {response.url}")
        
        # 创建基础item
        item = self.create_webpage_item(response)
        
        # 提取菜单信息
        item['category'] = response.meta.get('categorys', [])
        
        title = item['category'][-1] if item['category'] else ''
        item['title'] = title

        # 只解析包含下载链接的部分，提高性能
        soup = BeautifulSoup(response.text, 'lxml')
        
        # 快速查找下载链接
        download_links = soup.find_all('a', string=lambda text: text and '下载查看' in text)
        
        if download_links:
            item['main_files'] = []
            for i, link in enumerate(download_links, 1):
                href = link.get('href')
                if href:
                    item['main_files'].append({
                        'name': f'{title}检验报告{i}',
                        'full_url': self.urljoin(response.url, href)
                    })
            
            # 记录文件信息，但不在这里下载
            print(f"📋 发现 {len(item['main_files'])} 个文件需要下载")
            for file_info in item['main_files']:
                print(f"  - {file_info['name']}: {file_info['full_url']}")
        
        item['publish_time'] = ''
        item['remark'] = {}
        
        end_time = time.time()
        print(f"✅ 详情页处理完成: {response.url} (耗时: {end_time - start_time:.2f}秒)")
        
        yield item
