# -*- coding: utf-8 -*-
from time import sleep
import re
import scrapy
from ..items import NovelItem


class NovelSpiderSpider(scrapy.Spider):
    name = 'novel_spider'
    # allowed_domains = ['www.dashenxiaoshuo.com/html/28/28606/']
    start_urls = ['http://www.dashenxiaoshuo.com/xiaoshuodaquan/']

    def parse(self, response):

        items = NovelItem()
        novel_title = response.xpath('//div[@id="maininfo"]//h1//text()').extract()
        novel_auth = response.xpath('//div[@id="maininfo"]//div[@id="info_字段注释"]//p//text()').extract()
        novel_list = response.xpath('//dd//a//text()').extract()
        novel_content = response.xpath('//div[@id="content"]').extract()
        # print(novel_content)
        # if len(novel_title) != 0:
            # items['novel_title'] = novel_title
            # items['novel_auth'] = novel_auth[0].replace('\xa0','').strip()
            # items['novel_list'] = novel_list
            # items['novel_content'] = novel_content
            # yield items
        if novel_list:
            items['novel_list'] = novel_list
        if novel_content:
            items['novel_content'] = novel_content

        yield items


        # #获取所有小说的url，进入小说详情页面
        novel_urls = response.css('a::attr(href)').extract()
        if len(novel_urls)==0:
            sleep(2)
        #过了掉小说地址url为空的
        novel_urls = list(filter(lambda x:x != '',novel_urls))
        #筛选上面获得的小说url在指定的网域
        novel_company = list(filter(lambda x: 'dashenxiaoshuo' in x, novel_urls))
        for novel_url in novel_company:
            yield scrapy.Request(url=novel_url,callback=self.parse)


        novel_details_s = response.xpath('//dd//a/@href').extract()
        for novel_details in novel_details_s:
            novel_details = 'http://www.dashenxiaoshuo.com/{}'.format(novel_details)
        print(novel_details + '=================')
        for novel_detail in novel_details:
            yield scrapy.Request(url=novel_detail,callback=self.parse)

    # def details_parse(self,response):
    #     novel_content = response.xpath('//div[@id="content"]').extract()
    #     print(novel_content)




