# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import csv
import os
import re


class Youxin002Pipeline(object):
    def process_item(self, item, spider):
        download_path01 = os.getcwd() + '/优信二手车/'  # 当前文件夹下的download文件夹
        if not os.path.exists(download_path01):  # 判断文件夹或文件
            os.makedirs(download_path01)

        title_s = item.get('标题')
        title_re = re.findall(r"(.+?) ", title_s)[0]
        download_path02 = os.getcwd() + '/优信二手车/' + '/' + title_re + '/'
        title_re02 = re.findall(r" (.+?) ", title_s)[0]
        if not os.path.exists(download_path02):  # 判断文件夹或文件
            os.makedirs(download_path02)

        # 处理图片
        download_path03 = os.getcwd() + '/优信二手车/' + '/' + title_re + '/'+ '/image/'
        if not os.path.exists(download_path03):  # 判断文件夹或文件
            os.makedirs(download_path03)

        if item.get('type') == 'info':
            with open(download_path02 + ('%s.csv' % title_re02), 'a', encoding='utf-8')as f:
                f_csv = csv.DictWriter(f, ['标题','图片链接', '出厂年份', '地点', '总价格', '首付', '补贴'])
                item.pop('type')
                # print('-------------------------------------------1111111')
                f_csv.writerows([item])

        elif item.get('type') == 'img':
            with open(download_path03 + item.get('img_name'), 'wb') as f:
                f.write(item.get("img_bytes"))
        # if item.get('type') == 'img':
        #
        #     with open(download_path02 + item.get("img_name"), "wb") as f:
        #         item.pop('type')
        #         f.write(item.get("img_bytes"))

