# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


import os

class MeizhewujiangPipeline(object):
    def process_item(self, item, spider):
        return item

    # def open_spider(self, spider):
    #     # 获取当前工作目录
    #     base_dir = os.getcwd()
    #     fiename = base_dir + '/luoxia.txt'
    #     self.file = open()


class CoolscrapyPipeline(object):#需要在setting.py里设置'coolscrapy.piplines.CoolscrapyPipeline':300
    def process_item(self, item, spider):
        # 获取当前工作目录
        base_dir = os.getcwd()
        fiename = base_dir + '/luoxia.txt'
        res = dict(item)
        title = res['title']
        contents = res['content']
         # 从内存以追加的方式打开文件，并写入对应的数据
        with open(fiename, 'a', encoding='utf-8') as f:
            f.write(title + '\n')
            for content in contents:
                f.write(content + '\n')
            f.close()
        return item