# -*- coding: utf-8 -*-
import scrapy
from cnblog.items import CnblogItem
from scrapy import signals
from selenium import webdriver
from lxml import html
from bs4 import BeautifulSoup

class CnblogSpiderSpider(scrapy.Spider):
    name = "cnblog_spider_sjcx"
    allowed_domains = ["stjj.guizhou.gov.cn"]
    url = 'https://stjj.guizhou.gov.cn/tjsj_35719/sjcx_35720/index.html'
    offset = 1
    start_urls = [url]
    


    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        spider = super(CnblogSpiderSpider, cls).from_crawler(crawler, *args, **kwargs)
        spider.driver = webdriver.Chrome()
        crawler.signals.connect(spider.spider_closed, signal=signals.spider_closed)
        return spider


    def spider_closed(self, spider):
        spider.logger.info('Spider closed: %s', spider.name)
        spider.driver.close()

    def parse(self, response):

       
        item = CnblogItem()

        item['title'] = response.xpath('//span[@class="lbx"]/a[@target="_blank"]/text()').extract()       #使用xpath搜索
        item['link'] = response.xpath('//span[@class="lbx"]/a[@target="_blank"]/@href').extract()

        yield item

        print("第{0}页爬取完成".format(self.offset))
        if self.offset < 80:        #爬取到第几页
            self.offset += 1
        url2 = self.url[0:len(self.url)-5]+'_' +str(self.offset)+ self.url[-5:]   #拼接url
        print(url2)
        yield scrapy.Request(url=url2, callback=self.parse)

