# coding: utf-8
"""
Create on 2018/8/1

@author:hexiaosong
"""
from __future__ import unicode_literals

import scrapy
from bson import json_util
from lxml import etree
from scrapy.spiders import CrawlSpider

import sys
reload(sys)
sys.setdefaultencoding('gb2312')

CATEGORY_URL_DICT = {
    '男生玄幻':'/category?gender=male&type=hot&major=1',
    # '男生奇幻':'/category?gender=male&type=hot&major=6',
    # '男生武侠':'/category?gender=male&type=hot&major=11',
    # '男生仙侠':'/category?gender=male&type=hot&major=15',
    # '男生都市':'/category?gender=male&type=hot&major=20',
    # '男生职场':'/category?gender=male&type=hot&major=27',
    # '男生历史':'/category?gender=male&type=hot&major=31',
    # '男生军事':'/category?gender=male&type=hot&major=35',
    # '男生游戏':'/category?gender=male&type=hot&major=41',
    # '男生竞技':'/category?gender=male&type=hot&major=46',
    # '男生科幻':'/category?gender=male&type=hot&major=51',
    # '男生灵异':'/category?gender=male&type=hot&major=59',
    # '男生同人':'/category?gender=male&type=hot&major=64',
    # '男生轻小说':'/category?gender=male&type=hot&major=70',

    # '女生古代言情':'/category?gender=female&type=hot&major=71',
    # '女生现代言情':'/category?gender=female&type=hot&major=77',
    # '女生青春校园':'/category?gender=female&type=hot&major=83',
    # '女生纯爱':'/category?gender=female&type=hot&major=84',
    # '女生玄幻奇幻': '/category?gender=female&type=hot&major=87',
    # '女生武侠仙侠': '/category?gender=female&type=hot&major=90',
    # '女生科幻': '/category?gender=female&type=hot&major=93',
    # '女生游戏竞技': '/category?gender=female&type=hot&major=94',
    # '女生悬疑灵异': '/category?gender=female&type=hot&major=95',
    # '女生同人': '/category?gender=female&type=hot&major=98',
    # '女生女尊': '/category?gender=female&type=hot&major=104',

    # '出版物出版小说': '/category?gender=press&type=&major=106',
    # '出版物传记名著': '/category?gender=press&type=&major=107',
    # '出版物成功励志': '/category?gender=press&type=&major=108',
    # '出版物人文社科': '/category?gender=press&type=&major=109',
    # '出版物经管理财': '/category?gender=press&type=&major=110',
    # '出版物生活时尚': '/category?gender=press&type=&major=111',
    # '出版物育儿健康': '/category?gender=press&type=&major=112',
    # '出版物青春言情': '/category?gender=press&type=&major=113',
    # '出版物外文原版': '/category?gender=press&type=&major=114',
    # '出版物政治军事': '/category?gender=press&type=&major=115',

}


class NovelSpider(CrawlSpider):

    name = 'novel_list'

    def __init__(self, cate_urls=CATEGORY_URL_DICT, **kwargs):
        super(NovelSpider, self).__init__(**kwargs)
        self.website = 'http://www.zhuishushenqi.com'
        self.cate_urls = cate_urls

    @staticmethod
    def jprint(j_data):
        """
        打印dict数据
        :param j_data:
        :return:
        """
        print(json_util.dumps(j_data, ensure_ascii=False, indent=4))

    def start_requests(self):

        for cate, url in self.cate_urls.items():
            full_url = self.website + url
            for page in range(1, 50):
                page_url = full_url + '&minor=&page=%s' % page
                yield scrapy.Request(page_url, callback=self.parse_list_page, meta={"category":cate})

    def parse_list_page(self, response):

        if u'没有找到相关书籍，可以尝试直接搜索哦' not in response.text:
            category = response.meta.get('category')
            html = response.text
            e_tree = etree.HTML(html)
            eles = e_tree.xpath('//div[@class="books-list"]/a')

            for ele in eles:
                d = {}
                ele_str = etree.tostring(ele)
                ele_tree = etree.HTML(ele_str)
                d['category'] = category
                d['links'] = ele_tree.xpath('//a[@class="book"]/@href')[0]
                d['author'] = ele_tree.xpath('//a[@class="book"]//p[@class="author"]/span/text()')[0]
                d['title'] = ele_tree.xpath('//a[@class="book"]//h4[@class="name"]/span/text()')[0]
                d['desc'] = ele_tree.xpath('//a[@class="book"]//p[@class="desc"]/text()')[0]
                d['popularity'] = ele_tree.xpath('//a[@class="book"]//p[@class="popularity"]/span/text()')[0]
                d['cover_link'] = ele_tree.xpath('//a[@class="book"]//img[@class="cover"]/@src')[0]

                self.jprint(d)

                yield d