# -*- coding: utf-8 -*-
import scrapy
import datetime
import os
from ncep.items import GfsItem

base_url = 'https://nomads.ncep.noaa.gov/pub/data/nccf/com/gfs/prod/'

list_day = ['20200322']
list_hour = ['18', ] # , '00', '12', '06', '18'

class Gfs18Spider(scrapy.Spider):
    name = 'gfs18'
    allowed_domains = ['nomads.ncep.noaa.gov']
    start_urls = ['http://nomads.ncep.noaa.gov/']

    def parse(self, response):
        #if not os.path.exists('gfs'):
            #os.makedirs('gfs')
        #for str_day in list_day:
            today = datetime.datetime.today()
            str_day = today + datetime.timedelta(days=-1)
            str_day = datetime.datetime.strftime(str_day, "%Y%m%d")
            # str_day = '20210505'
            #if not os.path.exists(str_day):
                #os.makedirs(str_day)
            for str_hour in list_hour:
                temp_url = 'gfs.' + str_day + '/' + str_hour + '/' + 'atmos/'
                for i in range(121):
                    filename = 'gfs.t' + str_hour + 'z.pgrb2.0p25.f' + str(i).zfill(3)
                    url = base_url + temp_url + filename
                    item = GfsItem()
                    item['url'] = url
                    yield item
                    #break
                #break

                if str_hour == '12':
                    for i in range(123, 241, 3):
                        filename = 'gfs.t' + str_hour + 'z.pgrb2.0p25.f' + str(i).zfill(3)
                        url = base_url + temp_url + filename
                        item = GfsItem()
                        item['url'] = url
                        yield item
                        #break
