# coding: utf-8
"""
Create on 2018/7/31

@author:hexiaosong
"""
from __future__ import unicode_literals

import os
import codecs
import random
import datetime
import scrapy
from bson import json_util
from bs4 import BeautifulSoup
from scrapy.spiders import CrawlSpider
from NovelSpider.settings import CRAWL_NOVEL_NUM
from NovelSpider.items import NovelData, ChapterData
import sys
reload(sys)
sys.setdefaultencoding('gb2312')

class NovelSpider(CrawlSpider):

    name = 'novel_spider'

    def __init__(self, **kwargs):
        super(NovelSpider, self).__init__(**kwargs)
        self.website = 'http://www.zhuishushenqi.com'
        self.crawl_books = self.__get_books_url()

    @staticmethod
    def jprint(j_data):
        """
        打印dict数据
        :param j_data:
        :return:
        """
        print(json_util.dumps(j_data, ensure_ascii=False, indent=4))

    def _choice_novels(self, crawler_data, CRAWL_NOVEL_NUM):
        """
        限定每部小说当天只抓取一次
        """
        need_data = []
        today = datetime.date.today().strftime("%Y-%m-%d")
        for novel in crawler_data:
            res = ChapterData.objects.filter(title=novel.title)
            if not res:
                need_data.append(novel)
            elif res and today not in [item.add_time.strftime("%Y-%m-%d") for item in res]:
                need_data.append(novel)
        return need_data[:CRAWL_NOVEL_NUM]


    def __get_books_url(self):

        crawler_data = NovelData.objects.filter(finished=False)
        if crawler_data:
            #print(u'共获取%s部小说目录!' % len(crawler_data))
            choice_crawl_novels = random.sample(crawler_data, CRAWL_NOVEL_NUM)
            # choice_crawl_novels = self._choice_novels(crawler_data, CRAWL_NOVEL_NUM)
            #for item in choice_crawl_novels:
                #print(u'此次抓取%s部的小说为：%s.' % (CRAWL_NOVEL_NUM, item))
            return choice_crawl_novels
        else:
            #print 'mongodb中的小说概览已经爬取完毕！'
            return []

    def start_requests(self):
        if self.crawl_books:
            for book in self.crawl_books:
                full_links = self.website + book.links
                yield scrapy.Request(full_links, callback=self._parse_index_page)
        else:
            return

    def _parse_index_page(self, response):
        """
        解析小说概览页
        :return:
        """
        d = {}
        d['title'] = response.xpath('//ul[@class="page-route"]/li[last()]/text()').extract_first()

        chapter_hrefs = response.xpath('//ul[contains(@class,"chapter-list")]/li/a/@href').extract()
        chapter_hrefs = [self.website + item for item in chapter_hrefs]
        chapter_hrefs.reverse()
        i = 0
        for href in chapter_hrefs:
            page_num = int(href.split('/')[-1].strip('.html'))
            d['chapter_num'] = page_num
            res = ChapterData.objects.filter(
                platform=u'追书神器', title=d['title'], chapter_num=d['chapter_num'])
            if not res:
                yield scrapy.Request(href, callback=self._parse_detail_page, meta=d)

    def _parse_detail_page(self, response):
        """
        解析小说详情页
        :param response:
        :return:
        """
        d = response.meta
        page_num = response.url.split('/')[-1].split('.')[0]
        for item in ['depth', 'download_latency', 'download_slot', 'download_timeout']:
            if item in d.keys():
                d.pop(item)
        d['chapter'] = response.xpath('//ul[@class="page-route"]/li[last()]/text()').extract_first()
        content = response.text
        if u'本章为收费章节，请访问手机端或下载App继续阅读' in content:
            return
        soup = BeautifulSoup(content, "lxml")
        if 'inner-text' in content:
            d['chapter_text'] = soup.find("div",{"class":"inner-text"}).text.replace('\n','').strip()
        else:
            d['chapter_text'] = ''

        self.jprint(d)
        yield d
