# coding: utf-8
"""
Create on 2018/8/23

@author:hexiaosong
"""
from __future__ import unicode_literals

import re
import datetime
import scrapy
from lxml import etree
from bson import json_util
from bs4 import BeautifulSoup
from scrapy.spiders import CrawlSpider
from NovelSpider.settings import CRAWL_NOVEL_NUM
from NovelSpider.items import NovelData, ChapterData
from NovelSpider.spiders import chinese2digits


class NovelSpider(CrawlSpider):

    name = 'xs_novel_spider'

    def __init__(self, **kwargs):
        super(NovelSpider, self).__init__(**kwargs)
        self.website = 'https://www.35xs.com'
        self.crawl_books = self.__get_books_url()

    @staticmethod
    def jprint(j_data):
        """
        打印dict数据
        :param j_data:
        :return:
        """
        print(json_util.dumps(j_data, ensure_ascii=False, indent=4).encode("GBK", "ignore"))

    def _choice_novels(self, crawler_data):
        """
        限定每部小说当天只抓取一次
        """
        need_data = []
        today = datetime.date.today().strftime("%Y-%m-%d")
        for novel in crawler_data:
            res = ChapterData.objects.filter(title=novel.title)
            if not res:
                need_data.append(novel)
            elif res and today not in [item.add_time.strftime("%Y-%m-%d") for item in res]:
                need_data.append(novel)

        crawl_num = min(CRAWL_NOVEL_NUM, len(crawler_data))
        return need_data[:crawl_num]


    def __get_books_url(self):

        crawler_data = NovelData.objects.filter(platform=u'闪舞小说')
        if crawler_data:
            #print(u'共获取%s部小说目录!' % len(crawler_data))
            choice_crawl_novels = self._choice_novels(crawler_data)
            #for item in choice_crawl_novels:
                #print((u'此次按设定选择抓取%s部小说分别为：%s.' % (len(choice_crawl_novels), item)).encode("GBK", "ignore"))
            return choice_crawl_novels
        else:
            #print 'mongodb中的小说概览已经爬取完毕！'.encode("GBK", "ignore")
            return []

    def start_requests(self):
        if self.crawl_books:
            for book in self.crawl_books:
                yield scrapy.Request(book.links, callback=self._parse_book_page, meta={'title':book.title})
        else:
            return

    def _parse_book_page(self, response):
        """
        解析小说概览页
        :return:
        """
        d = {}
        d['title'] = response.meta.get('title')

        html = response.text
        e_tree = etree.HTML(html)
        eles = e_tree.xpath('//ul[@class="mulu_list"]/li')

        if len(eles):
            for ele in eles:
                block_tree = etree.HTML(etree.tostring(ele))
                if block_tree.xpath('//a/text()'):
                    d['chapter'] = block_tree.xpath('//a/text()')[0]
                else:
                    continue
                href = block_tree.xpath('//a/@href')[0]
                if u'章' not in d['chapter'] or u'序章' in d['chapter']:
                    continue
                tmp = re.search(u'第(.*?)章', d['chapter'])
                if tmp:
                    try:
                        d['chapter_num'] = chinese2digits(tmp.group(1))
                    except:
                        d['chapter_num'] = int(tmp.group(1))
                else:
                    d['chapter_num'] = 0
                chapter_url = self.website + href
                res = ChapterData.objects.filter(platform=u'闪舞小说',title=d['title'],chapter_num=d['chapter_num'])
                if not res:
                    yield scrapy.Request(chapter_url, callback=self._parse_detail_page, meta=d)

    def _parse_detail_page(self, response):
        """
        解析小说详情页
        :param response:
        :return:
        """
        d = response.meta
        for item in ['depth', 'download_latency', 'download_slot', 'download_timeout']:
            if item in d.keys():
                d.pop(item)
        soup = BeautifulSoup(response.text, "lxml")
        if 'class="text"' in response.text:
            d['chapter_text'] = soup.find('div',{'class':'text'}).text
        else:
            d['chapter_text'] = ''

        self.jprint(d)
        yield d
