# -*- coding: utf-8 -*-
import re

import scrapy

from famousbook.chapter_items import ChapterItem
from famousbook.items import FamousbookItem


class SpecialWorldSpiderSpider(scrapy.Spider):
    # 爬虫名字 世界名著爬虫
    name = 'special_world_spider'
    # 允许的域名
    allowed_domains = ['t.icesmall.cn']
    # 入口url
    start_urls = ['http://t.icesmall.cn/waptagSpecial/special_world/%E4%B8%96%E7%95%8C%E5%90%8D%E8%91%97']

    # 默认的解析方法
    def parse(self, response):
        # 循环电影的条目
        target_div = response.xpath("//div[@class='dirShow']")[0]
        book_list = target_div.xpath(".//ul/li")
        chapter_url_list = set()
        count = 0
        for i_item in book_list:
            count = count + 1
            if count == 10:
                break
            # item文件导进来
            famousbook_item = FamousbookItem()
            famousbook_item['book_type'] = 'special_world'
            # 写详细的xpath,进行数据的解析
            url = i_item.xpath(".//a/@href").extract_first()
            # print(url)
            # http://t.icesmall.cn/wapbookDir/1/44/0.html
            chapter_url_list.add(url)
            end_pos = url.rfind('/')
            start_pos = url.rfind('/', 0, end_pos)
            book_id = url[start_pos + 1:end_pos]
            # print(book_id)
            famousbook_item['book_id'] = book_id

            title = i_item.xpath(".//a/text()").extract_first()
            print(count)
            print(title)
            author = title.split(':')[0]
            # print(author)
            book_name = re.findall(r'[《](.*?)[》]', title)[0]
            # print(book_name)
            famousbook_item['book_name'] = book_name
            famousbook_item['author'] = author
            famousbook_item['introduce'] = ''
            # 你需要将数据yield到pipelines里面去
            yield famousbook_item
            # 获取书的章节页面进行解析

        for chapter_url in chapter_url_list:
            yield scrapy.Request(chapter_url, callback=self.parseChapter)

    # 解析章节页面的方法
    def parseChapter(self, response):
        print('--------------------------------------------------------------')
        # introduce_div = response.xpath("//div[@class='dirShow']")[0]
        # introduce = introduce_div.xpath(".//p[2]/text()").extract_first()
        # if introduce is not None and len(introduce) != 0:
        #     print(introduce)

        chapter_item = ChapterItem()
        chapter_item['book_id'] = 1
        chapter_item['title'] = 'test title'
        chapter_item['content'] = 'test content'
        # 你需要将数据yield到pipelines里面去
        yield chapter_item
