import scrapy
import time
from ..items import MyspiderItem
import  logging

logger = logging.getLogger(__name__)
class QidianSpider(scrapy.Spider):
    name = 'qidian'      #爬虫名称
    allowed_domains = ['qidian.com']       #允许的域名
    start_urls = [
                  'https://book.qidian.com/info/1014977108/#Catalog']
    def parse(self, response):
        # 网页的解析,首先获取到网页的所有的列表
        '''
        书籍的列表,清单
        :param response:
        :return:
        '''
        item = MyspiderItem()
        gailan = response.xpath('//*[@id="j-catalogWrap"]/div[2]/div')

        # 首先看 有没有概览   如果有 ,
        if gailan:
            print(gailan)
            title = response.xpath('/html/body/div/div[6]/div[1]/div[2]/h1/em/text()').extract_first()
            item['title']= title

            for gl in gailan:
                t1 = gl.xpath('.//h3/text()').extract()[1].strip()
                print("概览标题-->",t1)
                # item['gailan']=t1
                sub_list = gl.xpath('.//ul[@class="cf"]/li')
                # 遍历div  //*[@id="j-catalogWrap"]/div[2]/div[1]/ul/li[1]
                for l in sub_list:
                    wzbt = l.xpath('.//h2/a/text()').extract_first()
                    item['wzbt']=wzbt
                    wz_url = "https:"+l.xpath('.//h2/a/@href').extract_first()
                    item['zj_url']=wz_url
                    item['gailan'] = t1

                    yield scrapy.Request(url=wz_url, callback=self.single_cont, meta={'item': item})

    def single_cont(self,response):
        item = response.meta['item']
        # print(item)
        con = response.xpath('//div[@class="read-content j_readContent"]/p/text()').extract()
        # print("con--->", con)
        cont = ''.join(con)
        item['content'] = cont.strip()
        logger.info(item)  # 打印日志
        yield item




            # for div in sub_list:
            #     # --- 2. 创建MyspiderItem对象
            #     item = MyspiderItem()
            #
            #     title = div.xpath('.//div[@class="book-mid-info"]/h2/a/text()').extract_first()
            #     print('title------->',title)
            # #     leixing = div.xpath('.//div[2]/p[1]/a[2]/text()').extract_first()
            #     wzbt= div.xpath('.//h2/a/text()').extract_first()
            #     url ="https:"+div.xpath('.//h2/a/@href').extract_first()
            #     item['url'] = url
            #     item['title']=title
            #     item['wzbt']=wzbt
            #     # 打印提取到数据
            #
            # # 把提取到数据交给引擎
            #     yield scrapy.Request(url=url,callback=self.detail_content,meta={'item':item})

    def detail_page(self,response):
        '''
        每本书的章节,共有多少章节
        :param response:
        :return:
        '''
        item = response.meta['item']
        li_list = response.xpath('.//div/ul[@class="cf"]/li')
        # 每本书的章节标题
        for li in li_list:
            item['wzbt'] = li.xpath('.//h2/a/text()').extract_first()
            url_2="https:"+li.xpath('.//h2/a/@href').extract_first()
            item['url_2']=url_2
            # print('url_2:',url_2)
            yield scrapy.Request(url=url_2,callback=self.detail_content,meta={'item':item})

    def detail_content(self,response):
        '''
        文章的详情内容页面
        :param response:
        :return:
        '''
        item = response.meta['item']
        con = response.xpath('//div[@class="read-content j_readContent"]/p/text()').extract()
        print("con--->",con)
        cont = ''.join(con)
        item['content'] = cont.strip()
        yield item









