# "http://124.220.2.58/source/books/zips/\(id).zip"
import requests, json, os
from tqdm import tqdm
from urllib.parse import urlparse

# requests.get.
def download_file(url, save_path=None, chunk_size=1024, overwrite=False, show_progress=True):
    """
    使用requests库下载文件，支持断点续传和进度显示
    
    参数:
        url (str): 下载URL
        save_path (str, optional): 保存路径。如果为None，则从URL中推导文件名
        chunk_size (int, optional): 每次下载的数据块大小（字节）
        overwrite (bool, optional): 是否覆盖已存在的文件
        show_progress (bool, optional): 是否显示下载进度条
    
    返回:
        str: 下载后的文件路径，或在出错时返回None
    """
    try:
        # 从URL中推导文件名（如果save_path未指定）
        if save_path is None:
            parsed_url = urlparse(url)
            filename = os.path.basename(parsed_url.path)
            if not filename:
                filename = "downloaded_file"
            save_path = os.path.join(os.getcwd(), filename)
        
        # 检查文件是否已存在
        if os.path.exists(save_path):
            if overwrite:
                os.remove(save_path)
            else:
                file_size = os.path.getsize(save_path)
                print(f"文件已存在: {save_path} ({file_size/1024/1024:.2f} MB)")
                return save_path
        
        # 创建保存目录（如果不存在）
        save_dir = os.path.dirname(save_path)
        if save_dir and not os.path.exists(save_dir):
            os.makedirs(save_dir)
        
        # 发送HEAD请求获取文件大小
        head_response = requests.head(url)
        head_response.raise_for_status()
        total_size = int(head_response.headers.get('content-length', 0))
        
        # 开始下载
        with requests.get(url, stream=True) as response:
            response.raise_for_status()
            
            # 显示进度条
            if show_progress:
                progress_bar = tqdm(
                    total=total_size, 
                    unit='B',
                    unit_scale=True,
                    unit_divisor=1024,
                    desc=f"下载中: {os.path.basename(save_path)}"
                )
            
            # 写入文件
            with open(save_path, 'wb') as file:
                for chunk in response.iter_content(chunk_size=chunk_size):
                    if chunk:  # 过滤掉保持活动的空块
                        file.write(chunk)
                        if show_progress:
                            progress_bar.update(len(chunk))
            
            if show_progress:
                progress_bar.close()
        
        # 验证文件大小
        downloaded_size = os.path.getsize(save_path)
        if total_size and downloaded_size != total_size:
            print(f"警告: 文件大小不匹配。预期: {total_size} 字节, 实际: {downloaded_size} 字节")
        
        return save_path
    
    except requests.exceptions.RequestException as e:
        print(f"下载失败: {e}")
        return None
    except Exception as e:
        print(f"发生未知错误: {e}")
        return None


with open("/Users/taobo/Desktop/bookLists.json","r") as f:
    infos = json.load(f)
    for book_cata in infos.get("data"):
        cata = book_cata.get("title")
        for book in book_cata.get("items"):
            id = book.get("id")
            url = f"http://124.220.2.58/source/books/zips/{id}.zip"
            name = cata + "_" + book_cata.get("title") + "_" + id
            print(name, url)
            download_file(url=url,save_path=f"/Users/taobo/Desktop/dics/{name}.zip")