# coding: utf-8
"""
Create on 2018/8/22

@author:hexiaosong
"""
from __future__ import unicode_literals

import os
import scrapy
import urllib
from bson import json_util
from lxml import etree
from NovelSpider import SPIDER_PATH
from scrapy.spiders import CrawlSpider


class NovelSpider(CrawlSpider):

    name = 'tq_novel_list'
    start_urls = ['http://www.tqshuyuan.com/all.html']

    def __init__(self,  **kwargs):
        super(NovelSpider, self).__init__(**kwargs)

    @staticmethod
    def jprint(j_data):
        """
        打印dict数据
        :param j_data:
        :return:
        """
        print(json_util.dumps(j_data, ensure_ascii=False, indent=4))

    def parse(self, response):
        html = response.text
        e_tree = etree.HTML(html)
        eles = e_tree.xpath('//div[@id="main"]/div[@class="listlie"]')
        if len(eles)>0:
            for ele in eles:
                block_html = etree.tostring(ele)
                block_tree = etree.HTML(block_html)
                category = block_tree.xpath('//h2/text()')[0]
                if category and u'小说列表' in category:
                    category = category.replace(u'小说列表','')
                novels_title = block_tree.xpath('//li/a/text()')
                novels_href = block_tree.xpath('//li/a/@href')
                for href, title in zip(novels_href, novels_title):
                    yield scrapy.Request(href, callback=self.parse_list_page, meta={'title':title, 'links':href, 'category':category})

    def parse_list_page(self, response):
        d = {}
        d['title'] = response.meta.get('title')
        d['links'] = response.meta.get('links')
        d['category'] = response.meta.get('category')

        d['author'] = response.xpath('//p[@class="p_author"]/a/text()').extract_first()
        d['desc'] = response.xpath('//*[@id="bookintro"]/p/text()').extract_first()
        d['cover_link'] = response.xpath('//div[@id="bookimg"]/img/@src').extract_first()

        image_path = '%s/images/%s_%s.jpg' % (SPIDER_PATH, u'天晴书院', d['title'])
        if not os.path.exists(image_path):
            urllib.urlretrieve(d['cover_link'], image_path)
            d['cover'] = image_path

        yield d

