'''
Description: 
Autor: DZH
Date: 2020-11-20 20:04:54
LastEditors: DZH
LastEditTime: 2020-11-21 20:41:32
'''
# -*- coding: utf-8 -*-
import scrapy
from scrapy_1.items import Scrapy1Item
from urllib.parse import urljoin
from scrapy.http import Request


class BasicSpider(scrapy.Spider):
    name = 'basic'
    # allowed_domains = ['cnblogs.com']
    start_urls = [
        #  'http://localhost/54/ksyc1200/index.php/notice/c_notice/content/13'
        #'https://www.cnblogs.com/ashidamana/p/6122619.html'
        'http://www.byb.cn/doclist_0-1.aspx'
    ]
    COUNT_MAX = 1
    count = 0

    def parse(self, response):

        for article in response.xpath('//div[@class="item"]'):
            #  print(article)
            item = Scrapy1Item()
            item['title'] = article.xpath('h2/a/text()').extract()
            item['time'] = article.xpath('time/text()').extract()
            url = article.xpath('h2/a/@href').extract_first()
            url1 = urljoin(response.url, url)
            yield Request(url1,
                          callback=self.parse_detail,
                          meta={'item': item})
        self.count += 1
        if (self.count < self.COUNT_MAX):
            next_selector = response.xpath('//a[contains(text(),"下一页")]/@href')
            #  print(next_selector)
            for url in next_selector.extract():
                yield Request(urljoin(response.url, url))

    def parse_detail(self, response):
        item = response.meta['item']
        #print(response.url)
        #print(item)
        item['cont'] = response.xpath('//div[@id="contexthtml"]').extract()
        yield item

        #  item['title'] = response.xpath('//*[@id="cb_post_title_url"]/text()').extract()
        # print("title:%s" %
        #       response.xpath('//*[@id="ct_1"]/div/div[1]/h3/text()').extract()
        #       )  #//*[@id="ct_1"]/div/div[2]/div[1]
        #  item['time'] = response.xpath('//*[@id="post-date"]/text()').extract()

    # print(item)

    #  return item
