import time
import requests,json
import math
from concurrent.futures import ThreadPoolExecutor
import threading

# id='16483302'
# def get_data(id,page):
#     url = f"https://tuchong.com/rest/2/sites/{id}/posts?count=50&page={page}"
#     payload = {}
#     headers={"user-agent":"Mozilla/5.0(Windows NT 10.0;Win64;x64) AppleWebKit/537.36 (KHTML,like Gecko)Chrome/127.0.0.0Safari/537.36"}
#     response = requests.request("GET", url, headers=headers, data=payload)
#     if response.status_code == 200:
#         return json.loads(response.text)
#     else:
#         return None
#
# nums = get_data(id,page=1)['counts']
# counts = math.ceil(nums /50)
# pages_per_thread = counts // 4
# # counts=1
# urls=[]
#
# def get_url(page_start,page_end):
#     for page in range(page_start,page_end+1):
#         res = get_data(id, page)['post_list']
#         for r in res:
#             urls.append(r['images'][0]['source']['f'])
#         print(f'第{page}页url获取完成！')


def down_pic(url):
    try:
        response = requests.get(url,stream=True)
        response.raise_for_status()  # 检查状态码
        file_path = f"E:/TXT/pic/{url.split('/')[-1]}"
        with open(file_path, 'wb') as f:
            for chunk in response.iter_content(chunk_size=8192):
                f.write(chunk)
        print(f"下载完成：{url}")
    except requests.RequestException as e:
        print(f"下载失败：{url}，错误：{e}")

urls=[]
def read_url():
    with open('E:/TXT/pic/urls.txt', 'r') as f:
        uu=f.readlines()
        for u in uu:
            urls.append(u.strip())
    return urls


if __name__ == '__main__':

    # # 创建并启动线程，同时将它们添加到 threads 列表中
    # threads = []
    # thread1 = threading.Thread(target=get_url, args=(1, pages_per_thread))
    # thread1.start()
    # threads.append(thread1)
    #
    # thread2 = threading.Thread(target=get_url, args=(pages_per_thread + 1, 2 * pages_per_thread))
    # thread2.start()
    # threads.append(thread2)
    #
    # thread3 = threading.Thread(target=get_url, args=(2 * pages_per_thread + 1, 3 * pages_per_thread))
    # thread3.start()
    # threads.append(thread3)
    #
    # thread4 = threading.Thread(target=get_url, args=(3 * pages_per_thread + 1, counts))
    # thread4.start()
    # threads.append(thread4)
    # # 等待所有线程完成
    # for thread in threads:
    #     thread.join()
    #
    # urls = list(set(urls))
    # # urls写入本地txt文件
    # with open('E:/TXT/pic/urls.txt', 'w') as f:
    #     for url in urls:
    #         f.write(url + '\n')
    # print("urls获取完成 ", '共' + str(len(urls)) + '条!')

    urls=read_url()
    #开始下载图片
    for url in urls:
        down_pic(url)
        time.sleep(1)
    # with ThreadPoolExecutor(max_workers=5) as exe:
    #     for url in urls:
    #         exe.submit(down_pic, url)