# -*- coding: utf-8 -*-
import scrapy

from douban.items import Item


class DoubanSpiderSpider(scrapy.Spider):
    name = 'mingyan_spider'
    # allowed_domains = ['movie.douban.com']
    # start_urls = ['http://lab.scrapyd.cn/page/1/']
    start_urls = ['https://zh.wikipedia.org/wiki/File:Diversity_of_plants_image_version_5.png']
    domain = 'https://baike.baidu.com'
    # def start_requests(self):  # 由此方法通过下面链接爬取页面
    #
    #     # 定义爬取的链接
    #     urls = [
    #         'http://lab.scrapyd.cn/page/1/',
    #         'http://lab.scrapyd.cn/page/2/',
    #     ]
    #     for url in urls:
    #         yield scrapy.Request(url=url, callback=self.parse)  # 爬取到的页面如何处理？提交给parse方法处理

    def parse(self, response):
        print("response\n")
        content = response.css('div.main-content')
        title = content.css('.lemmaWgt-lemmaTitle-title h1::text').extract_first()
        descs = content.css('.lemma-summary')
        dl = content.css('.basic-info dl')
        urls = descs.xpath('.//a/@href').extract()
        urls += dl.xpath('.//a/@href').extract()
        # for url in urls:
        #     yield scrapy.Request(url= self.domain+url,callback=self.parse)
        #
        # arr = ''
        # for dd in dl:
        #     arr += "".join(dd.xpath("(*[(name(.)!='sup')])//text()").extract()).replace('\xa0','')
        # summary = "".join(descs.xpath('./div[@class="para"]//text()').extract())
        # item = Item()
        # item['title'] = title
        # item['summary'] = summary
        # item['attrs'] = arr
        # yield item
