# coding: utf-8
"""
Create on 2018/8/23

@author:hexiaosong
"""
from __future__ import unicode_literals

import os
import urllib
import scrapy
from bson import json_util
from lxml import etree
from scrapy.spiders import CrawlSpider
from NovelSpider import SPIDER_PATH


CATEGORY_URL_DICT = {
    '玄幻小说':'xuanhuan',
    '武侠修真':'xiuzhen',
    '都市言情':'dushi',
    '历史军事':'lishi',
    '网游竞技':'wangyou',
    '科幻小说':'kehuan',
    '恐怖灵异':'kongbu',
    '其他小说':'qita',
}


class NovelSpider(CrawlSpider):

    name = 'ck_novel_list'
    cate_url = 'https://www.ckxsw.com/%s_%s.html'

    def __init__(self, cate_urls=CATEGORY_URL_DICT, **kwargs):
        super(NovelSpider, self).__init__(**kwargs)
        self.website = 'https://www.ckxsw.com'
        self.cate_urls = cate_urls

    @staticmethod
    def jprint(j_data):
        """
        打印dict数据
        :param j_data:
        :return:
        """
        #print(json_util.dumps(j_data, ensure_ascii=False, indent=4))

    def start_requests(self):

        for cate, code in self.cate_urls.items():
            for page in range(1, 50):
                page_url = self.cate_url % (code, page)
                yield scrapy.Request(page_url, callback=self.parse_list_page, meta={"category":cate})

    def parse_list_page(self, response):

        d = {}
        d['category'] = response.meta.get('category')

        html = response.text
        e_tree = etree.HTML(html)

        eles = e_tree.xpath('//div[@class="fl_right"]/div[@class="fl_nr"]')
        if len(eles)>0:
            for ele in eles:
                try:
                    block_tree = etree.HTML(etree.tostring(ele))

                    d['title'] = block_tree.xpath('//p[@class="p1"]/a/text()')[0]
                    d['author'] = block_tree.xpath('//p[@class="p4"]/a/text()')[0]
                    d['desc'] = block_tree.xpath('//p[@class="p5"]/a/text() | //p[@class="p5"]/text()')[0]
                    d['links'] = block_tree.xpath('//p[@class="p1"]/a/@href')[0]

                    d['cover_link'] = block_tree.xpath('//img/@src')[0]
                    image_path = '%s/images/%s_%s.jpg' % (SPIDER_PATH, u'创客小说', d['title'])
                    if not os.path.exists(image_path):
                        urllib.urlretrieve(d['cover_link'], image_path)
                    d['cover'] = image_path
                except:
                    continue

                yield d
