# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
import codecs
from scrapy.conf import settings
from scrapy import log
import os
import re

from novel.items import BiQuGeCategoryItem
from novel.items import BiQuGeNovelItem
from novel.items import BiQuGeNovelChapterItem


class JsondPipelines(object):

    @staticmethod
    def filterFileName(fileName):
        rstr = r"[\/\\\:\*\?\"\<\>\|]"  # '/ \ : * ? " < > |'
        new_filename = re.sub(rstr, "_", fileName)  # 替换为下划线
        return new_filename

    def __init__(self):
        self.fileurl = settings.get('DATA_URL')

    def process_item(self, item, spider):

        # if isinstance(item, BiQuGeCategoryItem):

        # if isinstance(item, BiQuGeNovelItem):

        if isinstance(item, BiQuGeNovelChapterItem):
            item['content'] = item['content'].replace(',\r\n', '')

            # 目录地址
            _menu_url = self.fileurl + '/' + \
                self.filterFileName(item["source"]) + '/' + \
                self.filterFileName(item["categoryName"]) + '/' + \
                self.filterFileName(item["novelName"])

            # 判断一个目录是否存在
            # 多层创建目录
            if not os.path.exists(_menu_url):
                os.makedirs(_menu_url)

            filename = self.filterFileName(
                str(item["sort"]) + '_' + item["name"]) + '.json'

            self.filename = open(
                _menu_url + '/' + filename, "wb")
            jsontext = json.dumps(dict(item), ensure_ascii=False)
            self.filename.write(jsontext.encode("utf-8"))

        return item

    def close_spider(self, spider):
        self.filename.close()
