# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import os
from io import BytesIO
from os.path import join, basename, dirname
from urllib.parse import urlparse
from xmly.settings import FILES_STORE
from scrapy.pipelines.files import FilesPipeline
# class XmlyPipeline(object):
#     def process_item(self, item, spider):
#         return item
from scrapy.http import Request
from scrapy.utils.misc import md5sum


class XmlyFilesPipeline(FilesPipeline):
    ### Overridable Interface
    def get_media_requests(self, item, info):
        return [Request(x, meta={"item": item}) for x in item.get(self.files_urls_field, [])]

    def file_downloaded(self, response, request, info):
        item = request.meta["item"]
        file = os.path.join(FILES_STORE, "%(albumId)s/%(title)s.mp3" % request.meta["item"])
        with open(file, "wb") as f:
            f.write(response.body)
        path = self.file_path(request, response=response, info=info)
        buf = BytesIO(response.body)
        checksum = md5sum(buf)
        buf.seek(0)
        self.store.persist_file(path, buf, info)
        return checksum


    def file_path(self, request, response=None, info=None, *, item=None):
        return "%(albumId)s/%(title)s.mp3" % request.meta["item"]