import requests
# from multiprocessing import Process
from threading import Thread
import re
import time
from retry import retry
import os

headers = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'zh-CN,zh;q=0.9',
    'Connection': 'keep-alive',
    # 'Cookie': '_ga=GA1.2.1675229666.1610187138; _gid=GA1.2.1180088115.1610187138; DATA=X-mlZCbX5gJi5Gr68v4-SwAAADA',
    'Cookie': '_ga=GA1.2.1675229666.1610187138; DATA=YAaSzp1PdgJjwYzrp11JsgAAAlo',
    'Host': 'e4ftl01.cr.usgs.gov',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36'
}


@retry(tries=5, delay=2)
def downloader(url, content_dir):
    if not os.path.exists(content_dir):
        os.makedirs(content_dir)
    file_rule = re.match(r"https://.*//.*/.*/.*/(.*)/(.*)", url)
    if not file_rule:
        return
    file_name = content_dir + file_rule.groups()[0] + '-' + file_rule.groups()[1]
    # file_name = file_rule.groups()[0] + '-' + file_rule.groups()[1]
    print(file_name)
    response = requests.get(url, headers=headers)
    # print(response.content)
    print(response.status_code)
    with open(file_name, 'wb') as f:
        content = response.content
        f.write(content)


def download():
    for i in (range(2000, 2020)):
        dirs = "C:\\Users\\91481\\Desktop\\hdf_data\\" + str(i) + "\\"
        file = str(i) + ".url"

        with open(file) as f:
            urls = f.read().split("\n")

        for i in urls:
            downloader(i, dirs)
            time.sleep(1)


if __name__ == '__main__':
# with open("2020.url") as f:
#     urls = f.read().split("\n")
#
# dirs = "C:\\Users\\91481\\Desktop\\hdf_data\\" +"2020\\"
# for i in urls:
#     downloader(i, dirs)
#     time.sleep(1)
    download()
