# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy import Request
from scrapy.pipelines.files import FilesPipeline
from urllib.parse import urlparse
import os, re
from ncep.items import CfsItem, GfsItem, RadarItem

class NcepPipeline(object):
    def process_item(self, item, spider):
        return item

class CfsPlipeline(FilesPipeline):
    def get_media_requests(self, item, info):
        if isinstance(item, CfsItem):
            yield Request(item['url'])

    def file_path(self, request, response=None, info=None):
        url = request.url
        base_dir = 'cfsv2/'
        dir_name = url.split("/")[-4]
        file_name = url.split("/")[-1]
        file_path = base_dir + dir_name + '/' + file_name
        # print('dir'*10, dir_name)
        # print('filepath'*10, file_path)
        return file_path

class GfsPlipeline(FilesPipeline):
    def get_media_requests(self, item, info):
        if isinstance(item, GfsItem):
            yield Request(item['url'])

    def file_path(self, request, response=None, info=None):
        url = request.url
        base_dir = 'gfs/'
        #dir_name = url.split("/")[-3][4:]
        #file_name = url.split("/")[-1]
        #file_path = base_dir + dir_name + '/' + file_name
        # print('dir'*10, dir_name)
        # print('filepath'*10, file_path)

        file_name = url.split("/prod/")[-1]
        file_path = base_dir +  file_name
        #print('filepath'*10, file_path)
        return file_path


class RadarPlipeline(FilesPipeline):
    def get_media_requests(self, item, info):
        if isinstance(item, RadarItem):
            # print('Request==========', item['url'])
            yield Request(item['url'])

    def file_path(self, request, response=None, info=None):
        url = request.url
        # print('url'*10, url)
        base_dir = 'radar/'
        dir_name = url.split("/")[-3] + '/' + url.split("/")[-2]
        dir_temp = re.search('\d{8}', request.url).group(0)
        file_name = url.split("/")[-1]
        file_path = base_dir + dir_name + '/' + dir_temp + '/' + file_name
        # print('dir'*10, dir_name)
        # print('filepath'*10, file_path)
        return file_path
