# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import csv

# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
from scrapy.exceptions import DropItem


class Scrapy1Pipeline:

    def open_spider(self, spider):
        self.file = open('D:/data/car.csv', 'w', newline='')
        self.writer = csv.writer(self.file)
        self.headers = ['车型', '厂商', '指导价','月份销售数据','地区上牌量']  # Add the fields you want to save  ,'月份销售数据','地区上牌量','级别'
        self.writer.writerow(self.headers)

    def close_spider(self, spider):
        self.file.close()

    def process_item(self, item, spider):
        if not item['Carname']:  # Add the field names you want to save, for example 'title', 'link', 'price'
            raise DropItem("Missing Carname field")
        if not item['Manufacturers']:
            raise DropItem("Missing Manufacturers field")
        if not item['Price']:
            raise DropItem("Missing Price field")
        if not item['Main_sales']:
            raise DropItem("Missing Main_sales field")
        if not item['Main_apply']:
            raise DropItem("Missing Main_apply field")
            # If you have multiple fields to save, add them to the writerow method
        self.writer.writerow(
            [item['Carname'], item['Manufacturers'], item['Price'],item['Main_sales'], item['Main_apply']])  # Replace 'price' with the actual field name item['Main_sales'], item['Main_apply'], item['Type']
        return item
