# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import pymysql
from fake_useragent import UserAgent

from scrapy.pipelines.files import FilesPipeline
from urllib.parse import urlparse
from os.path import basename,dirname,join
from scrapy.exceptions import DropItem
import os
import scrapy


# 下载文件
class SogouWordFilePipeline(FilesPipeline):
    def get_media_requests(self, item, info):
        yield scrapy.Request(item['dictUrl'], meta={'item': item})

    # 下载完成时调用
    def item_completed(self, results, item, info):
        file_paths = [x['path'] for ok, x in results if ok]
        if not file_paths:
            print('下载失败')
            raise DropItem("Item contains no files")
        print(item['dictName']+'下载成功')
        return item

    # 文件名
    def file_path(self, request, response=None, info=None):
        item = request.meta['item']
        path = item['dictName']
        return path