import requests
from bs4 import BeautifulSoup
import time
from datetime import datetime
from .base_crawler import BaseCrawler
import logging
from news_crawler.utils.news_parser import NewsParser

class BeijingGongzuodongtaiCrawler(BaseCrawler):
    """北京市政府部门动态爬虫"""
    
    def __init__(self, source_config):
        super().__init__(source_config)
        self.session = requests.Session()
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Connection': 'keep-alive'
        }
        
        # 配置新闻解析规则
        self.news_config = {
            'title_selectors': ['h1.title'],
            'time_selectors': ['div.time', 'div.detail-time'],
            'content_selectors': ['div.TRS_Editor', 'div.article-content'],
            'time_formats': ['%Y-%m-%d']
        }

    def get_page_content(self, url):
        """获取页面内容"""
        try:
            response = self.session.get(url, headers=self.headers)
            response.encoding = 'utf-8'  # 北京政府网站使用utf-8编码
            return response.text
        except Exception as e:
            logging.error(f"获取页面失败 {url}: {str(e)}")
            return None

    def get_urls(self):
        """获取所有新闻URL"""
        base_url = self.source_config['url']
        urls = []
        
        try:
            # 获取第一页
            logging.info("正在获取第1页...")
            first_page = self.get_page_content(base_url)
            if first_page:
                first_page_urls = self.parse_list_page(first_page)
                logging.info(f"第1页获取到 {len(first_page_urls)} 个URL")
                urls.extend(first_page_urls)
            
            # 获取其他页面(2-40页)
            for page in range(1, 40):  # 从1开始是因为页面URL是index_1.html开始的
                url = f'{base_url}/index_{page}.html'
                logging.info(f'正在获取第{page+1}页...')
                page_html = self.get_page_content(url)
                if page_html:
                    page_urls = self.parse_list_page(page_html)
                    logging.info(f"第{page+1}页获取到 {len(page_urls)} 个URL")
                    urls.extend(page_urls)
                time.sleep(1)  # 避免请求过快
                
            logging.info(f"总共获取到 {len(urls)} 个URL")
            return urls
        except Exception as e:
            logging.error(f"获取URL列表失败: {str(e)}")
            return []

    def parse_list_page(self, html):
        """解析列表页获取URL"""
        if not html:
            return []
            
        soup = BeautifulSoup(html, 'html.parser')
        urls = []
        
        try:
            # 从搜索结果看，部门动态页面的新闻列表结构与要闻相同
            for item in soup.find_all('li', class_='col-md'):
                link = item.find('a')
                if link:
                    href = link.get('href', '')
                    if href:
                        # 处理相对路径
                        if href.startswith('./'):
                            href = href[2:]  # 移除开头的 './'
                        url = self.resolve_url(self.source_config['url'], href)
                        urls.append(url)
            return urls
        except Exception as e:
            logging.error(f"解析列表页失败: {str(e)}")
            return []

    def parse_page(self, url):
        """解析新闻详情页"""
        logging.info(f'正在解析: {url}')
        html = self.get_page_content(url)
        if not html:
            return None
            
        try:
            # 使用通用解析工具解析页面
            result = NewsParser.parse_news_page(html, url, self.news_config)
            if result:
                result['url'] = url
                return result
            return None
        except Exception as e:
            logging.error(f"解析详情页失败 {url}: {str(e)}")
            return None
