# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import csv
import os

from itemadapter import ItemAdapter


class GuaziPipeline(object):
    def process_item(self, item, spider):
        print("-----我是管道，接收到了数据--->", item, "----数据属于爬虫:--->", spider)
        download_path = os.getcwd() + '/download/'  # 当前文件夹下的download文件夹
        if not os.path.exists(download_path):  # 判断文件夹或文件
            os.makedirs(download_path)
        g = open(download_path + './瓜子二手车.csv', 'a', encoding='utf-8')
        g_csv = csv.writer(g)
        info = item.get('title'), item.get('old_money'), item.get('new_money'), item.get('year')
        g_csv.writerow(info)
        print("保存信息到CSV....ok")
        return item
