# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import os


class GuanwangPipeline(object):
    def process_item(self, item, spider):
        download_path01 = os.getcwd() + '/ym/'  # 当前文件夹下的download文件夹
        if not os.path.exists(download_path01):  # 判断文件夹或文件
            os.makedirs(download_path01)

        download_path02 = os.getcwd() + '/ym/' + '/_static/' + '/' + '/css/'  # 当前文件夹下的download文件夹
        if not os.path.exists(download_path02):  # 判断文件夹或文件
            os.makedirs(download_path02)
        in_html = item.get('wy')

        download_path03 = os.getcwd() + '/ym/' + '/_static/' + '/' + '/js/'  # 当前文件夹下的download文件夹
        if not os.path.exists(download_path03):  # 判断文件夹或文件
            os.makedirs(download_path03)


        if item.get('type') == 'html':
            with open(download_path01 + 'index.html', 'w', encoding='utf-8')as f:
                f.write(in_html.decode())
        elif item.get('type') == 'css':
            with open(download_path02 + item.get('css_name'), 'w', encoding='utf-8')as f:
                f.write(in_html.decode())
        elif item.get('type') == 'js':
            with open(download_path03 + item.get('js_name'), 'w', encoding='utf-8')as f:
                f.write(in_html.decode())



