# coding: utf-8
import sys
sys.getdefaultencoding()
import scrapy
import re
'''
一个爬虫
'''
class HngpSpider(scrapy.Spider):
    name = 'hngp' #爬虫名
    allowed_domains = ['www.hngp.gov.cn']    #允许爬取范围
    #start_urls = ['http://www.hngp.gov.cn/'] #最开始请求的URL地址
    start_urls = ['http://www.hngp.gov.cn/henan/ggcx?appCode=H60&channelCode=0101&bz=0&pageSize=16&pageNo=1']  #入口url 扔到调度器里去   就是要写入具体要爬的URL
    count = 0  #记录处理的页数
    # 默认解析方法(请求成功后默认调用的方法  对搜索结果页进行处理）
    def parse(self, response):
        print("处理页码:",HngpSpider.count)
        #分组
        li_list = response.xpath("/html/body/div[3]/div[2]/div[2]/div[1]/ul/li")
        for li in li_list:
            hngp_item = {}
            hngp_item["title"] = li.xpath("./a/text()").extract_first()
            pre_title_url =  li.xpath("./a/@href").extract_first()
            hngp_item["title_url"] = pre_title_url if "http:" in pre_title_url else ("http://www.hngp.gov.cn" + pre_title_url)
            yield scrapy.Request(url=hngp_item['title_url'], meta={'item': hngp_item}, callback=self.parse_detall,dont_filter=True)
        next_link = response.xpath("//li[@class='nextPage']/a/@href").extract_first()  # 获取相对url
        next_link_url = response.urljoin(next_link)  # 拼接绝对url
        print("下一页：",next_link_url)
        if next_link_url:
            HngpSpider.count += 1
            if HngpSpider.count<10:
                yema1 = next_link_url[-8:]  # 截取倒数八个字符
                # yema = re.sub("\D", "", yema1)
                yema = re.sub("\D", "", yema1)
                print("接下来爬取页数是：" + yema + "    链接是：" + next_link_url)
                # callback=self.parse   回调函数   递归调用自己
                # time.sleep(5)
                yield scrapy.Request(next_link_url, callback=self.parse, dont_filter=True)
            else:
                pass

    def parse_detall(self, response):
        div = response.xpath('//div[@class="TxtRight Padding5"]')
        if not div:
            self.log("detail page err - - %s" % response.url)
        hngp_item = response.meta['item']
        hngp_item['title_date'] = div.xpath('./span[last()-1]/text()').extract_first()
        hngp_item['come_from'] = "河南政府采购网"
        print(hngp_item)
        yield hngp_item #传到管道
