import scrapy
from ..items import RzyItem


class GovSpider(scrapy.Spider):
    name = "gov"
    allowed_domains = ["gov.cn"]
    start_urls = [
        "http://sousuo.gov.cn/s.htm?t=zhengce&q=%E7%A2%B3&timetype=&mintime=&maxtime=&sort=&sortType=&searchfield=&pcodeJiguan=&childtype=&subchildtype=&tsbq=&pubtimeyear=&puborg=&pcodeYear=&pcodeNum=&filetype=&p=0&n=&inpro=&sug_t="]

    def parse(self, response):
        for item in response.xpath("//ul[contains(@class,'middle_result_con')]/li"):  # 获取文件列表
            items = RzyItem()
            items['file_name'] = item.xpath("./a/text()").get()  # 获取文件名字
            url = item.xpath("./a/@href").get()  # 获取详细页url
            items['file_url'] = url
            yield scrapy.Request(url=items['file_url'], callback=self.detail_parse, meta={'item': items})

        # 翻页操作
        for i in range(11):
            url = f"http://sousuo.gov.cn/s.htm?t=zhengce&q=%E7%A2%B3&timetype=&mintime=&maxtime=&sort=&sortType=&searchfield=&pcodeJiguan=&childtype=&subchildtype=&tsbq=&pubtimeyear=&puborg=&pcodeYear=&pcodeNum=&filetype=&p={i}&n=&inpro=&sug_t="
            yield scrapy.Request(url=url, callback=self.parse)

    def detail_parse(self, response):
        items = response.meta['item']
        items['file_time'] = response.xpath("//div[contains(@class,'pages-date')]/text()[1]").get()
        items['file_page1'] = response.xpath("//p[contains(@style,'2em')][2]/text()").get()
        yield items
