import requests
# from multiprocessing import Process
from threading import Thread
import re
import url
import time
from retry import retry

headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Connection': 'keep-alive',
# 'Cookie': '_ga=GA1.2.1675229666.1610187138; _gid=GA1.2.1180088115.1610187138; DATA=X-mlZCbX5gJi5Gr68v4-SwAAADA',
'Cookie': '_ga=GA1.2.1675229666.1610187138; DATA=YAKw6FjMC7@KbcV3jIccCwAAALU',
'Host': 'e4ftl01.cr.usgs.gov',
# 'sec-ch-ua': '"Google Chrome";v="87", " Not;A Brand";v="99", "Chromium";v="87"',
# 'sec-ch-ua-mobile': '?0',
# 'Sec-Fetch-Dest': 'document',
# 'Sec-Fetch-Mode': 'navigate',
# 'Sec-Fetch-Site': 'none',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36'
}


@retry(tries=5, delay=2)
def downloader(url):
    file_rule = re.match(r"https://.*//.*/.*/.*/(.*)/(.*)", url)
    # file_name = "C:\\Users\\91481\\Desktop\\hdf_data\\" + file_rule.groups()[0] + '-' + file_rule.groups()[1]
    file_name = file_rule.groups()[0] + '-' + file_rule.groups()[1]

    response = requests.get(url, headers=headers)
    print(response.content)

    with open(file_name, 'wb') as f:
        content = response.content
        f.write(content)


url_demo = "https://e4ftl01.cr.usgs.gov//MODV6_Cmp_B/MOLT/MOD13A3.006/2019.12.01/MOD13A3.A2019335.h27v05.006.2020004221619.hdf"

# x = re.match(r"https://.*//.*/.*/.*/(.*)/(.*)", url_demo)


process_list = []

err_urls = []

def create_process(mode=1):
    if mode:
        urls = url.get_url_list()
    else:
        urls = url.filter()

    for i in range(len(urls)):
    # for i in range(3):
        try:
            p = Thread(target=downloader, args=(urls[i],))

            p.start()
            time.sleep(1)
            process_list.append(p)
        except:
            err_urls.append(urls[i])


    for i in process_list:
        i.join()



if __name__ == "__main__":
    # create_process(mode=0)
    # print(err_urls)
    downloader(url.filter()[0])
