import scrapy
from scrapy import Selector, Request
from scrapy.http import HtmlResponse

from spiderdemo.items import DetailItem


class NmdisSpider(scrapy.Spider):
    name = "nmdis"
    allowed_domains = ["nmdis.org.cn"]

    def start_requests(self):
        for page in range(1):
            print(f"爬取页数：{page + 1}")
            if page < 1:
                yield Request(url=f'https://www.nmdis.org.cn/hyxw/gnhyxw/')
            else:
                yield Request(url=f'https://www.nmdis.org.cn/hyxw/gnhyxw/index_{page + 1}.shtml')

    # 获取详情页的url
    def parse(self, response: HtmlResponse):
        sel = Selector(response)
        url_items = sel.css("div.title.fl.ellipsis > a::attr(href)")
        for url in url_items:
            detail_item = DetailItem()
            detail_item['details_url'] = response.urljoin(url.extract())
            details_url = response.urljoin(url.extract())
            # 回调
            yield Request(url=details_url, callback=self.parse_detail, cb_kwargs={'item': detail_item})

    # 获取详情页数据
    def parse_detail(self, response, **kwargs):
        detail_item = kwargs['item']
        sel = Selector(response)
        detail_item['title'] = sel.xpath('//h2[@class="maintit"]/text()').extract_first()
        detail_item['source'] = sel.xpath('//em[@class="fl"]/text()').extract_first()
        detail_item['release_time'] = sel.xpath('//em[@class="fr"]/text()').extract_first()
        detail_arr = sel.xpath('//div[@class="bodytext"]//text()').extract()
        detail_item['context'] = "".join(detail_arr)
        yield detail_item
