# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from __future__ import unicode_literals

import os
import codecs
import urllib
from scrapy.exceptions import DropItem
from NovelSpider import SPIDER_PATH
from NovelSpider.items import *

class NovelspiderPipeline(object):

    def __init__(self, **kwargs):
        super(NovelspiderPipeline, self).__init__(**kwargs)
        self.__update_chapter_max()

    def __update_chapter_max(self):
        crawled_novels = NovelData.objects.filter(platform=u'闪舞小说')
        for novel in crawled_novels:
            chapter_data = ChapterData.objects.filter(title=novel['title'])
            chapter_max =  max([item.chapter_num for item in chapter_data] or [0])
            novel.update(set__crawl_chapter_max=chapter_max)

    def process_item(self, item, spider):
        if spider.name == 'xs_novel_spider':
            if item['chapter_text']:
                if not ChapterData.objects.filter(chapter = item['chapter']):
                    data = ChapterData(**item)
                    data.save()
            else:
                raise DropItem("Missing chapter text.")
        elif spider.name == 'xs_novel_list':
            if not NovelData.objects.filter(title=item['title']):
                data = NovelData(**item)
                data.save()

        return item
