# coding: utf-8
"""
Create on 2018/8/23

@author:hexiaosong
"""
from __future__ import unicode_literals

import os
import urllib
import scrapy
from bson import json_util
from lxml import etree
from scrapy.spiders import CrawlSpider
from NovelSpider import SPIDER_PATH


CATEGORY_URL_DICT = {
    '玄幻奇幻':'1',
    '武侠仙侠':'2',
    '科幻灵异':'6',
    '历史军事':'7',
    '都市言情':'8',
    '现代言情':'9',
    '校园言情':'10',
    '古代言情':'11',
    '网游竞技':'15',
    '经典美文':'13',
    '穿越时空':'14',
}


class NovelSpider(CrawlSpider):

    name = 'xs_novel_list'
    cate_url = 'https://www.35xs.com/cate/%s/#page=%s'

    def __init__(self, cate_urls=CATEGORY_URL_DICT, **kwargs):
        super(NovelSpider, self).__init__(**kwargs)
        self.website = 'https://www.35xs.com'
        self.cate_urls = cate_urls

    @staticmethod
    def jprint(j_data):
        """
        打印dict数据
        :param j_data:
        :return:
        """
        #print(json_util.dumps(j_data, ensure_ascii=False, indent=4))

    def start_requests(self):

        for cate, id in self.cate_urls.items():
            for page in range(1, 50):
                page_url = self.cate_url % (id, page)
                yield scrapy.Request(page_url, callback=self.parse_list_page, meta={"category":cate})

    def parse_list_page(self, response):

        d = {}
        d['category'] = response.meta.get('category')

        html = response.text
        e_tree = etree.HTML(html)

        eles = e_tree.xpath('//div[@class="tab-content"]//tbody/tr')
        if len(eles)>0:
            for ele in eles:
                block_tree = etree.HTML(etree.tostring(ele))
                d['links'] = self.website + block_tree.xpath('//td[2]/a/@href')[0]
                d['title'] = block_tree.xpath('//td[2]/a/text()')[0]
                d['author'] = block_tree.xpath('//td[3]/text()')[0]
                yield scrapy.Request(d['links'], callback=self.parse_detail_page, meta=d)


    def parse_detail_page(self, response):

        d = response.meta

        for key in ['download_timeout', 'depth', 'download_latency', 'download_slot']:
            if key in d.keys():
                d.pop(key)

        d['desc'] = response.xpath('//div[@class="booksum"]/p/text()').extract_first()
        d['cover_link'] = self.website + response.xpath('//div[@class="bookdetail_content"]/img/@src').extract_first()

        image_path = '%s/images/%s_%s.jpg' % (SPIDER_PATH, u'闪舞小说', d['title'])
        if not os.path.exists(image_path):
            urllib.urlretrieve(d['cover_link'], image_path)
        d['cover'] = image_path

        yield d
