# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import redis


# class DoubanMoviePipeline(object):
#     def process_item(self, item, spider):
#     # TODO: 将 item 结果以 JSON 形式保存到 Redis 数据库的 list 结构中
#         data = {'url':item['url'],'name':item['name'],'summary':item['summary'],'score':item['score']}
#         self.redis.lpush('douban_movie:items',data)
#         return item
#
#
#     def open_spider(self, spider):
#         # 连接数据库
#         self.redis = redis.StrictRedis(host='localhost', port=6379, db=0)


class JobsPipeline(object):
    def __init__(self):
        #csv文件的位置,无需事先创建
        store_file = os.path.dirname(__file__) + '/shanghai_jobs.csv'
        #打开(创建)文件
        self.file = open(store_file,'wb')
        #csv写法
        self.writer = csv.writer(self.file)

    def process_item(self,item,spider):
        #判断字段值不为空再写入文件
        if item['image_name']:
            self.writer.writerow((item['image_name'].encode('utf8','ignore'),item['image_urls']))
        return item

    def close_spider(self,spider):
        #关闭爬虫时顺便将文件保存退出
