# -*- coding: utf-8 -*-

import scrapy
from scrapy.pipelines.files import FilesPipeline
import requests
import os
from urllib.parse import urlparse
from os.path import basename
from scrapy.exceptions import DropItem

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html

class MyDownloadPipeline(object):

    def process_item(self, item, spider):

        if len(item["file_urls"]) != 0:

            dir = ".\\Download\\" + item["type"] + "\\" + item["platform"][0] + "\\" + item["category"] + "\\"
            file_name = dir + item["file_urls"][0].split("/")[-1]

            r = requests.get(item["file_urls"][0])
            if r.status_code == 404:
                with open("dowload_err404.txt", "a+") as info:
                    info.write(item["file_urls"][0])
                    info.write(",")
                    info.write(item["title"][0])
                    info.write("\n")
                info.close()
            else:
                if not os.path.exists(dir):
                    os.makedirs(dir)
                if not os.path.exists(file_name):
                    with open(file_name, "wb") as f:
                        f.write(r.content)
                    f.close()
                with open(dir+ "dowload_success.txt", "a+") as info:
                    info.write(item["file_urls"][0])
                    info.write(",")
                    info.write(item["title"][0])
                    info.write("\n")
                info.close()
        return item


class FileDownloadPipeline(FilesPipeline):
    def file_path(self, request, response=None, info=None):
        down_file_name = basename(urlparse(request.url).path)
        return down_file_name

class SpiderPipeline(object):
    def process_item(self, item, spider):
        return item