# -*- coding: utf-8 -*-
import scrapy
import re

class ZzggzySpider(scrapy.Spider):
    name = 'zzggzy' #郑州公共资源交易中心
    allowed_domains = ['zzsggzy.com']
    # start_urls = ['http://www.hngp.gov.cn/'] #最开始请求的URL地址
    start_urls = ['http://zzsggzy.com/jsgc/004001/subpage.html']  # 入口url 扔到调度器里去   就是要写入具体要爬的URL
    count = 0  # 记录处理的页数

    # 默认解析方法(请求成功后默认调用的方法  对搜索结果页进行处理）
    def parse(self, response):
        print("处理页码:", ZzggzySpider.count)
        # 分组
        li_list = response.xpath("/html/body/div[3]/div/div[2]/div/div[2]/ul/li")

        for li in li_list:
            zzggzy_item = {}
            zzggzy_item["title"] = re.sub('[\r\n\t\s*]', '', li.xpath("./div[@class='ewb-com-block l']/a/text()").extract_first())#标题
            zzggzy_item['title_url'] = response.urljoin(li.xpath("./div[@class='ewb-com-block l']/a/@href").extract_first())  # 拼接绝对url
            zzggzy_item['title_date'] = re.sub('[\r\t\n\s*]', '', li.xpath("./span/text()").extract_first())# 时间
            zzggzy_item['come_from'] = "郑州公共资源交易中心"
            print(zzggzy_item)
            yield zzggzy_item

        # # 下一页的连接 ./span[last()-1]/text()
        url = response.xpath("//div[@class='ewb-page']//ul/li[last()-5]/a/@href").extract_first()
        next_link_url = response.urljoin(url)
        if next_link_url:
            ZzggzySpider.count += 1
            if ZzggzySpider.count < 30:
                print( ZzggzySpider.count)
                print("接下来爬取页数是：" + url + "    链接是：" + next_link_url)
                yield scrapy.Request(next_link_url, callback = self.parse, dont_filter = True)
            else:
                pass