# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter


#数据处理及存储
import os
from scrapyBook.entity.Book import *
class ScrapybookPipeline:

    pipLists = []
    def process_item(self, item, spider):
        books = Book()
        print("=======写入list============")
        #获取解析参数
        books.section = item['section']
        books.sectionCount = item['sectionCount']
        books.mainBody = item['mainBody']
        self.pipLists.append(books)
        return item

    #在爬虫结束后写入文件
    def close_spider(self, spider):
        print("========爬虫结束时保存文件========")
        try:
            import operator
        except ImportError:
            cmpfun = lambda x: x.count  # use a lambda if no operator module
        else:
            cmpfun = operator.attrgetter("sectionCount")  # use operator since it's faster than lambda
        self.pipLists.sort(key=cmpfun, reverse=False)
        print('***********对文章list进行排序完成***********************')
        #清除文本
        if os.path.exists('book.txt'): # True/False
            with open("book.txt", 'r+') as file:
                file.truncate(0)
            print('***********清除文本成功***********************')
        for obj in self.pipLists:
            print("list正文=========================%s" % obj.section, obj.sectionCount)
            #a:追加，w重新写入
            with open('book.txt', 'a', encoding='UTF-8') as f:
                f.write(obj.section + '\n' + obj.mainBody)
