import scrapy
import logging
from ..items import MofScraperItem

class MofSpider(scrapy.Spider):
    name = 'mof'
    allowed_domains = ['mof.gov.cn']
    start_urls = ['https://www.mof.gov.cn/index.htm']
    
    def parse(self, response):
        self.logger.info('正在爬取页面: %s', response.url)
        
        # 获取主页中的新闻链接
        news_links = response.css('div.xwzx_box a::attr(href), div.tzgg_box a::attr(href)').getall()
        if not news_links:
            # 尝试其他选择器
            news_links = response.css('a.bt::attr(href), a.bt1::attr(href)').getall()
            if not news_links:
                # 再次尝试通用选择器
                news_links = response.css('a::attr(href)').getall()
                news_links = [link for link in news_links if '.htm' in link and not link.startswith('javascript')]
        
        for link in news_links:
            # 处理相对URL
            if link.startswith('./'):
                link = link[2:]
            if not link.startswith('http'):
                # 如果是以/开头的绝对路径
                if link.startswith('/'):
                    link = f'https://www.mof.gov.cn{link}'
                # 如果是相对路径
                else:
                    base_url = '/'.join(response.url.split('/')[:-1])
                    link = f'{base_url}/{link}'
            
            yield response.follow(link, callback=self.parse_detail)
        
        # 查找更多新闻链接
        sections = response.css('div.xxgk_box, div.zcfb_box, div.zcjd_box')
        for section in sections:
            links = section.css('a::attr(href)').getall()
            for link in links:
                if link.startswith('./'):
                    link = link[2:]
                if not link.startswith('http'):
                    # 如果是以/开头的绝对路径
                    if link.startswith('/'):
                        link = f'https://www.mof.gov.cn{link}'
                    # 如果是相对路径
                    else:
                        base_url = '/'.join(response.url.split('/')[:-1])
                        link = f'{base_url}/{link}'
                
                yield response.follow(link, callback=self.parse_detail)
        
        # 获取新闻列表页
        news_list_links = [
            'https://www.mof.gov.cn/zhengwuxinxi/caizhengxinwen/',
            'https://www.mof.gov.cn/zhengwuxinxi/zhengcefabu/',
            'https://www.mof.gov.cn/zhengwuxinxi/zhengcejiedu/'
        ]
        
        for list_link in news_list_links:
            yield response.follow(list_link, callback=self.parse_list)
    
    def parse_list(self, response):
        self.logger.info('正在爬取列表页: %s', response.url)
        
        # 获取新闻列表中的链接
        news_links = response.css('div.xwzx_ml a::attr(href), table.zctb a::attr(href)').getall()
        if not news_links:
            news_links = response.css('a::attr(href)').getall()
            news_links = [link for link in news_links if '.htm' in link and not link.startswith('javascript')]
        
        for link in news_links:
            # 处理相对URL
            if link.startswith('./'):
                link = link[2:]
            if not link.startswith('http'):
                # 如果是以/开头的绝对路径
                if link.startswith('/'):
                    link = f'https://www.mof.gov.cn{link}'
                # 如果是相对路径
                else:
                    base_url = '/'.join(response.url.split('/')[:-1])
                    link = f'{base_url}/{link}'
            
            yield response.follow(link, callback=self.parse_detail)
        
        # 获取下一页链接
        next_page = response.css('a:contains("下一页")::attr(href), a.next::attr(href)').get()
        if next_page:
            if not next_page.startswith('http'):
                base_url = '/'.join(response.url.split('/')[:-1])
                next_page = f'{base_url}/{next_page}'
            yield response.follow(next_page, callback=self.parse_list)
    
    def parse_detail(self, response):
        self.logger.info('正在解析详情页: %s', response.url)
        
        item = MofScraperItem()
        
        # 提取标题
        item['title'] = response.css('div.xilan_tit::text, div.the_title::text').get() or \
                       response.css('h2::text').get() or \
                       response.xpath('//title/text()').get()
        
        if item['title']:
            item['title'] = item['title'].strip()
        
        # 提取发布日期
        date_pattern = r'\d{4}-\d{2}-\d{2}'
        item['publish_date'] = response.css('div.xilan_titf::text, div.the_title_f::text').re_first(date_pattern) or \
                              response.css('div.pages-date::text').re_first(date_pattern)
        
        # 提取内容
        content_selectors = [
            'div#zoom p::text, div#zoom p *::text',
            'div.TRS_Editor p::text, div.TRS_Editor p *::text',
            'div.xilan_con p::text, div.xilan_con p *::text',
            'div.the_content p::text, div.the_content p *::text',
        ]
        
        for selector in content_selectors:
            content_parts = response.css(selector).getall()
            if content_parts:
                item['content'] = '\n'.join([part.strip() for part in content_parts if part.strip()])
                break
        else:
            item['content'] = ''
        
        # 提取来源
        source_pattern = r'来源[:：]\s*([^\s,，]+)'
        item['source'] = response.css('div.xilan_titf::text, div.the_title_f::text').re_first(source_pattern) or ''
        if not item['source']:
            source_text = response.css('div.xilan_titf::text, div.the_title_f::text').get() or ''
            if '来源' in source_text:
                item['source'] = source_text.split('来源')[-1].strip('：: ')
        
        # 添加URL
        item['url'] = response.url
        
        return item 