# -*- coding: utf-8 -*-
from scrapy.utils.project import get_project_settings

from pyhdfs import HdfsClient
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html


# 提取到的数据保存到本地的文件中
class VBaikeDemoPipeline(object):
    def process_item(self, item, spider):
        title = item['title']
        setting = get_project_settings()
        content = item['content']
        base_path = setting['IMAGES_STORE']
        file_name = base_path + '/' + title + '/' + title + '.txt'
        with open(file_name, 'w') as f:
            f.write(content)
        return item

# 将数据存储到hdfs管道
class VBaikeHDFSPipeline(object):
    def process_item(self, item, spider):
        # 构建连接
        fs = HdfsClient(hosts=['192.168.2.131:50070', '192.168.2.132:50070'], user_name='hadoop')
        content = item['content'] + '\n'
        str_b = bytes(content, encoding='utf-8')
        # 如果文件存在，则添加数据，不存在则新建数据
        if not fs.exists(path='/test/data.txt'):
            fs.create(path='/test/data.txt', data=str_b)
        else:
            fs.append(path='/test/data.txt', data=str_b)
        return item