
import os
import random
import time
import requests
from datetime import datetime
from concurrent.futures import ThreadPoolExecutor
from urllib.parse import urlparse
from tenacity import retry, stop_after_attempt, wait_fixed

from weibo_video_fetcher_new import WeiboVideoFetcher

from tqdm import tqdm

BASE_PATH = r'D:\迅雷下载'
CHUNK_SIZE = 8192

def create_timestamp_dir(folder_prefix):
    timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
    file_path = os.path.join(BASE_PATH, folder_prefix + "_" + timestamp)
    os.makedirs(file_path, exist_ok=True)
    return file_path, timestamp

@retry(stop=stop_after_attempt(3), wait=wait_fixed(2))
def download_image(url, save_dir, timestamp, index, headers, cookie, type):
    response = None
    try:
        time.sleep(random.uniform(0.5, 2))
        print(f"start download 1 {url.strip()}")
        response = requests.get(url.strip(), headers=headers, cookies=cookie, timeout=5)
        print(f"end download 1 {response.status_code}")
        if response.status_code != 200:
            print(f"start download 2 {url.strip()}")
            response = requests.get(url.strip(), timeout=5)
            print(f"end download 2 {response.status_code}")
        if response.status_code == 200:
            ext = os.path.splitext(urlparse(url).path)[1] or '.jpg'
            filename = f"{timestamp}_{index:04d}{ext}"
            with open(os.path.join(save_dir, filename), 'wb') as f:
                f.write(response.content)
            print(f"下载成功: {filename}")
        else:
            print(f"下载失败 {url}, {response.status_code if response else -1}")
    except Exception as e:
        print(f"下载失败 {url}, {response.status_code if response else -1}: {str(e)}")
        raise

@retry(stop=stop_after_attempt(3), wait=wait_fixed(2))
def download_file_instream(url, save_dir, timestamp, index, headers, cookie, total_len):
    time.sleep(random.uniform(0.5, 2))

    try:
        if down_by_url(url, save_dir, timestamp, index, None, None, total_len) != 200:
            down_by_url(url, save_dir, timestamp, index, headers, cookie, total_len)
    except Exception as e:
        print(f"下载失败 {url}")
        raise

def down_by_url(url, save_dir, timestamp, index, headers, cookie, total_len):
    with requests.get(url.strip(), headers=headers, cookies=cookie, stream=True, timeout=10) as r:
        print(f"第{index + 1}个/共{total_len}个. {r.status_code}")
        if r.status_code == 200:
            total_size = int(r.headers.get('content-length', 0))

            ext = os.path.splitext(urlparse(url).path)[1] or '.jpg'
            filename = f"{timestamp}_{index:04d}{ext}"
            progress = tqdm(total=total_size, unit='B', unit_scale=True, desc=f"第{index + 1}个/共{total_len}个")
            with open(os.path.join(save_dir, filename), 'wb') as f:
                for chunk in r.iter_content(chunk_size=CHUNK_SIZE):
                    if chunk:  # 过滤keep-alive空块
                        f.write(chunk)
                        progress.update(len(chunk))

        return r.status_code




def batch_download(txt_file='result.txt', folder_prefix="video", max_workers=10):
    with open(txt_file, 'r') as f:
        urls = [line for line in f if line.strip()]

    if len(urls) <= 0:
        return
    save_dir, timestamp = create_timestamp_dir(folder_prefix)

    obj = WeiboVideoFetcher('header.txt')
    headers, cookie = obj.header, obj.cookies
    
    print(f"start {save_dir} download")
    with ThreadPoolExecutor(max_workers=max_workers) as executor:
        futures = []
        for idx, url in enumerate(urls):
            futures.append(executor.submit(download_file_instream, url, save_dir, timestamp, idx, headers, cookie, len(urls)))
        for future in futures:
            future.result()

if __name__ == '__main__':
    batch_download()
