# -*- coding: utf-8 -*-
import re
import scrapy

from selenium import webdriver
from scrapy.http import Request
from urllib import parse
from scrapy.loader import ItemLoader
from ArticleSpider.items import SzhrssArticleItem, ArticleItemLoader

from ArticleSpider.utils.common import get_md5

class RensheSpider(scrapy.Spider):
    name = 'renshe'
    allowed_domains = ['http://www.szhrss.gov.cn']
    start_urls = ['http://www.szhrss.gov.cn/tzgg/']

    def parse(self, response):
        """
        1. 获取文章列表页中的文章url并交给scrapy下载后并进行解析
        2. 获取下一页的url并交给scrapy进行下载， 下载完成后交给parse
        """

        # 解析列表页中的所有文章url并交给scrapy下载后并进行解析
        post_nodes = response.css(".conRight_text .conRight_text_ul1 li")
        for post_node in post_nodes:
            create_date = post_node.css("span::text").extract_first("")
            post_url = post_node.css("a::attr(href)").extract_first("")
            yield Request(url=parse.urljoin(response.url, post_url), dont_filter=True,meta={"create_date": create_date},
                          callback=self.parse_detail)

        req_url="www.szhrss.gov.cn/tzgg/index.htm"
        browser = webdriver.Chrome()
        # 提取下一页并交给scrapy进行下载
        browser.get(req_url)
        text =browser.page_source
        next_url = post_node.selector(".page > a:nth-child(6)").get_attribute('href')
        print(next_url)
        # browser.close()

        if next_url:
            yield Request(url=parse.urljoin(response.url, next_url), callback=self.parse)

    def parse_detail(self, response):
        create_date = response.meta.get("create_date", "")
        article_item = SzhrssArticleItem()
        # 通过item loader加载item
        item_loader = ArticleItemLoader(item=SzhrssArticleItem(), response=response)
        item_loader.add_css("title", ".conRight_text2 h4::text")
        item_loader.add_value("create_date", create_date)
        print(item_loader)
        article_item = item_loader.load_item()
        yield article_item

