# -*- coding: utf-8 -*-
import scrapy
from yaocaifang.items import CnblogItem
from scrapy import Selector

class YaoCaiFangSpider(scrapy.Spider):
    #  药材坊
    name = "yaocaifang_spider"
    allowed_domains = ["baicao99.com"]
    # url = 'https://www.cnblogs.com/sitehome/p/'
    url = 'https://www.baicao99.com/yaocai/list_0_0_0_0_0_'
    offset = 1
    subfix = ".html"
    start_urls = [url+str(offset)+subfix]

    def parse(self, response):
        selector = Selector(response)

        item = CnblogItem()

        # item['title'] = response.xpath('//a[@class="titlelnk"]/text()').extract()       #使用xpath搜索
        # item['link'] = response.xpath('//a[@class="titlelnk"]/@href').extract()
        writeElemnt = selector.xpath('//*[@id="write"]/a')

        href = writeElemnt.xpath("@href")
        print("href1:"+href)
        writeElemnt = writeElemnt.extract()
        print("href2:"+href)
        # links = writeElemnt.xpath("//a").getall()
        # for l in links:
        #     href = l.__getattribute__("href")
        #     print(href,l)
        #

        # item['title'] = response.xpath('//a[@class="post-item-title"]/text()').extract()       #使用xpath搜索
        # item['link'] = response.xpath('//a[@class="post-item-title"]/@href').extract()
        # print(item['title'])
        # print(item['link'])
        yield item

        print("第{0}页爬取完成".format(self.offset))
        if self.offset < 10:        #爬取到第几页
            self.offset += 1
        url2 = self.url+str(self.offset)  + self.subfix  #拼接url
        print(url2)
        yield scrapy.Request(url=url2, callback=self.parse)

