# -*- coding: utf-8 -*-

from bs4 import BeautifulSoup
from ..base_parser import BasePageParser


class NewsListParser(BasePageParser):
    """新闻列表页解析器"""
    
    def can_handle(self, response):
        """判断是否为新闻列表页"""
        return 'newslist.php' in response.url or "law.php" in response.url
    
    
    def parse(self, response):
        """解析新闻列表页"""
        # 禁用日志输出
        
        # 使用BeautifulSoup提取新闻链接
        news_links = []
        
        # 解析HTML
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 查找新闻链接
        news_containers = soup.select('.blog .detail-content li')
        for container in news_containers:
            link_tag = container.find('a')
            if link_tag and link_tag.get('href'):
                link = link_tag['href']
                if link:
                    full_url = self.urljoin(response.url, link.strip())
                    news_links.append(full_url)
        

        
        if not news_links:
            return

        import re
        catid_match = re.search(r'catid=(\d+)', response.url)
        category_title = ["首页"]
        if 'newslist.php' in response.url:
            if catid_match:
                catid = int(catid_match.group(1))
                if 65556 <= catid <= 65558:
                    category_title.append("市场前瞻")
                    if catid == 65556:
                        category_title.append("新产品")
                    elif catid == 65557:
                        category_title.append("新技术")
                    elif catid == 65558:
                        category_title.append("新材料")
            elif cateid == 57 or cateid==65559:
                category_title.append('行业管理')
                category_title.append("行业动态" if catid == 57 else "政策信息")
            elif cateid == 14 or cateid ==45:
                category_title.append('消防科技')
                category_title.append("消防科普" if catid == 14 else "科技资讯")

        elif 'law.php' in  response.url:
            category_title.append('法规标准')
            if catid_match:
                cate_id = int(catid_match.group(1))
                if cate_id == 80:
                    category_title.append('规范标准')
            else:
                category_title.append('法律法规')


        # 生成详情页请求
        for detail_url in news_links:
            yield self.make_request(
                detail_url,
                callback=self.spider.parse,  # 返回主spider进行路由
                meta={
                    'category_url': response.url,
                    'page_type': 'news_detail',
                    "category": category_title
                },
                use_selenium=False  # 可以根据需要设置为 True
            )
            return
            # 禁用日志输出
        
                # 使用BeautifulSoup处理分页
        next_page_link = None
        
        # 查找分页容器
        pagination = soup.select_one('.productPage li.this')
        if pagination:
            # 获取下一个兄弟元素
            next_item = pagination.find_next_sibling('li')
            if next_item:
                # 检查是否包含链接
                link_tag = next_item.find('a')
                if link_tag and link_tag.get('href'):
                    next_page_link = link_tag['href'].strip()
                    self.logger.info(f"找到下一页链接: {next_page_link}")

        

        if next_page_link:
            next_url = self.urljoin(response.url, next_page_link)
            # 禁用日志输出
            
            yield self.make_request(
                next_url,
                callback=self.spider.parse,
                meta={'page_type': 'news_list'},
                use_selenium=False  # 可以根据需要设置为 True
            )
    
