# -*- coding: utf-8 -*-
import scrapy
import datetime
import os
from ncep.items import CfsItem

base_url = 'https://nomads.ncep.noaa.gov/pub/data/nccf/com/cfs/prod/cfs/'
list_hour = ['00', '06', '12', '18']
#list_hour = ['00','18']
class Cfsv2Spider(scrapy.Spider):
    name = 'cfs'
    allowed_domains = ['nomads.ncep.noaa.gov']
    start_urls = ['https://nomads.ncep.noaa.gov/pub/data/nccf/com/cfs/prod/cfs/']

    def parse(self, response):
        # list_day = ['20200218',]
        # for str_day in list_day:
        today = datetime.datetime.today()
        str_day = today + datetime.timedelta(days=-1)
        str_day = datetime.datetime.strftime(str_day, "%Y%m%d")
        # str_day = '20210505'
        temp_url = 'cfs.' + str_day + '/12/6hrly_grib_01/'
        current_time = datetime.datetime.strptime(str_day, '%Y%m%d')

        list_filename = []
        list_filename.append('flxf' + str_day + '12.01.' + str_day + '12.grb2')
        list_filename.append('flxf' + str_day + '18.01.' + str_day + '12.grb2')
        list_filename.append('pgbf' + str_day + '12.01.' + str_day + '12.grb2')
        list_filename.append('pgbf' + str_day + '18.01.' + str_day + '12.grb2')
        for i in range(1, 31):
            after_time = current_time + datetime.timedelta(days=+i)
            str_after_time = datetime.datetime.strftime(after_time, "%Y%m%d")

            for hour in list_hour:
                filename = 'flxf' + str_after_time + hour + '.01.' + str_day + '12.grb2'
                list_filename.append(filename)
                filename = 'pgbf' + str_after_time + hour + '.01.' + str_day + '12.grb2'
                list_filename.append(filename)
        for filename in list_filename:
            url = base_url + temp_url + filename
            item = CfsItem()
            item['url'] = url
            yield item
