import scrapy
from poetry.items import *
import time

class ZhonghuadiancangSpider(scrapy.Spider):
    name = 'zhonghuadiancang'
    allowed_domains = ['www.zhonghuadiancang.com']
    start_urls = [
        'https://www.zhonghuadiancang.com/foxuebaodian/',   #佛学宝典
        'https://www.zhonghuadiancang.com/rulizhexue/',
        'https://www.zhonghuadiancang.com/lishizhuanji/',
        'https://www.zhonghuadiancang.com/shicixiqu/',
        'https://www.zhonghuadiancang.com/wenxueyishu/',
        'https://www.zhonghuadiancang.com/xuanxuewushu/',
        'https://www.zhonghuadiancang.com/xueshuzaji/',
        'https://www.zhonghuadiancang.com/tianwendili/',
        'https://www.zhonghuadiancang.com/leishuwenji/',    #类书文集
        'https://www.zhonghuadiancang.com/waiguomingzhu/',  #外国名著
        'https://www.zhonghuadiancang.com/guoxuezhishi/',   #国学知识
    ]

    def parse(self, response):
        print("获取列表：", response.request.url)
        #获取下一页网址
        page_next = response.xpath('//*[@title="下一页"]/@href')
        if page_next:
           yield scrapy.Request(response.urljoin( page_next.get() ), self.parse)
        #获取当前列表页所有的内容页网址
        for i in response.xpath("//*[@class='col-md-4 col-sm-8 col-xs-8']/a/@href"):
            item_url = response.urljoin(i.get())
            yield scrapy.Request( item_url , self.get_book)

    def get_book(self, response):
        item = BookItem()
        item['table_name'] = "ebooks"
        item['title'] = response.xpath("//*[@class='active']/a/text()").extract_first()
        item['author'] = response.xpath("//h1/small/a/text()").extract_first()
        item['summary'] = response.xpath("//*[@class='m-summary']/text()").extract_first()
        item['years'] = response.xpath("//*[@class='alert']/a[1]/text()").extract_first()
        item['category'] = response.xpath("//*[@class='breadcrumb']/li[2]/a/text()").extract_first()
        tags = response.xpath("//*[@class='alert']/a/text()").extract()

        item['tags'] = ','.join(tags)
        
        item['grab_time'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
        item['grab_url'] = response.request.url
        print("\t获取电子书：",item['title'])

        sort = 0
        if response.xpath("//*[@id='booklist']/li/a/@href"):
            for i in response.xpath("//*[@id='booklist']/li/a/@href"):
                sort += 1
                yield scrapy.Request(i.get(), 
                    self.get_chapter_item,
                    cb_kwargs=dict(parent_url=0, sort=sort, book_url= response.request.url)
                )
        else:
            for i in response.xpath("//*/h3"):
                # title =i.xpath("a/text()").extract_first()
                url = response.urljoin(i.xpath("a/@href").extract_first())
                sort += 1
                yield scrapy.Request(url, 
                    self.get_chapter_item,
                    cb_kwargs=dict(parent_url=0, sort=sort, book_url= response.request.url)
                )
                for chapter_url in i.xpath("following-sibling::ul[1]/li/a/@href").extract():
                    sort += 1
                    yield scrapy.Request(chapter_url, 
                        self.get_chapter_item,
                        cb_kwargs=dict(parent_url=url, sort=sort, book_url= response.request.url)
                    )
        
        yield item
        

    def get_chapter_item(self, response, parent_url, sort, book_url):
        item = ChapterItem()
        item['table_name'] = "chapters"
        item['title'] = response.xpath("//h1/text()").extract_first()
        if not item['title']:
            item['title'] = response.xpath("//*[@class='active']/a/text()").extract_first()

        item['content'] = response.xpath("//*[@id='content']").extract_first()
        item['book_url'] = book_url
        
        item['sort'] = sort
        item['parent_url'] = parent_url
        #采集时间
        item['grab_time'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
        #采集网址
        item['grab_url'] = response.request.url
        print("\t\t获取章节：",item['title'])
        yield item
