# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import re
from scrapy.exceptions import DropItem

class LianjiaHomePipeline:
    def process_item(self, item, spider):
        # 如果有不完全的数据，则抛弃
        if item["elevator"] == "暂无数据":
            raise DropItem("房屋朝向无数据，抛弃：{}".format(item))
        # 面积
        item["area"] = re.findall("\d+\.?\d*", item["area"])[0]
        # 单价
        item["unit_price"] = re.findall("\d+\.?\d*", item["unit_price"])[0]

        return item

class CSVPipeline:
    file = None

    def open_spider(self, spider): #爬虫开始时执行
        self.file = open("lianjia.csv", "a", encoding = "utf-8-sig")
        self.file.write("name,type,area,direction,fitment,elevator,total_price,unit_price\n")

    def process_item(self, item, spider):
        self.file.write("{},{},{},{},{},{},{},{}\n".format(
            item["name"],
            item["type"],
            item["area"],
            item["direction"],
            item["fitment"],
            item["elevator"],
            item["total_price"],
            item["unit_price"]
        ))

        return item

    def close_spider(self, spider): #爬虫结束时执行
        self.file.close()