import os

import scrapy

from spiders.items import CeiItem


class CeiSpider(scrapy.Spider):
    name = "cei"
    start_urls = ['https://www.cei.cn/d/search/highSearch.action?cursor=0']

    # 设置User-Agent
    custom_settings = {
        "ITEM_PIPELINES": {
            "spiders.pipelines.CeiExcelExportPipeline": 1,
        },
        "DOWNLOADER_MIDDLEWARES": {
            "spiders.middlewares.RandomUserAgentMiddleware": 101,
            "spiders.middlewares.ProxyMiddleware": 100
        },
        'DOWNLOAD_DELAY': 1,  # 2秒的下载延迟
        'COOKIES_ENABLED': True,
        'CONCURRENT_REQUESTS': 16,  # 限制总的并发请求数
        'JOBDIR': os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'jobdir', 'cei_jobdir'))
    }

    def start_requests(self):
        url = self.start_urls[0]
        yield scrapy.FormRequest(url,
                                 formdata={
                                     'keywords': '税',
                                     'pageSize': '10',
                                     'pages': '9574',
                                     'url': '?cursor=',
                                     'activePage': '0',
                                     'p': '0'
                                 },
                                 callback=self.parse,
                                 meta={'page': 0, 'cursor': 0})

    def parse(self, response):
        for link in response.css('.search_list li a::attr(href)').extract():
            yield scrapy.Request(link, callback=self.parse_details)

        next_page = response.meta.get('page') + 1
        next_cursor = response.meta.get('cursor') + 10
        print(f"nex_page 超过了 {next_page}")
        if next_page > 7200:  # 停止爬取当页数超过10页
            return

        url = f'https://www.cei.cn/d/search/highSearch.action?cursor={next_cursor}'
        print(f"url: {url}")
        yield scrapy.FormRequest(url,
                                 formdata={
                                     'keywords': '税',
                                     'pageSize': '10',
                                     'pages': '9574',
                                     'url': '?cursor=',
                                     'p': str(next_page),
                                     'activePage': str(next_page)
                                 },
                                 callback=self.parse,
                                 meta={'page': next_page, 'cursor': next_cursor})

    def parse_details(self, response):
        title = response.css('.xx_con_tile h2::text').extract_first()
        title = self.clean_text(title)

        content = ''.join(response.css('#content *::text').extract())
        content = self.clean_text(content)

        # 检查标题和内容是否都为空
        if not title and not content:
            return

        item = CeiItem()
        item['url'] = response.url
        item['title'] = title

        time_matches = response.css('.xx_con_tile .t_l::text').re('时间：(\d{4}-\d{2}-\d{2})')
        if time_matches:
            item['time'] = time_matches[0]
        else:
            item['time'] = ''

        source_matches = response.css('.xx_con_tile .t_l::text').re('来源：(.*)')
        if source_matches:
            item['source'] = source_matches[0]
        else:
            item['source'] = ''

        item['content'] = content
        yield item

    def clean_text(self, text):
        if text:
            return text.replace('\t', '').replace('\r', '').replace('\n', '').strip()
        return text

