import requests
from bs4 import BeautifulSoup
import threading
import os

student_id='102302148'
max_count=int(student_id[-3:])
page=int(student_id[-1])
count=0

lock = threading.Lock()
# 创建图片保存目录
if not os.path.exists('book_images'):
    os.makedirs('book_images')
def get_html(url):
    try:
        res = requests.get(url,  timeout=10)
        res.encoding = 'gbk'
        return res.text
    except Exception as e:
        print(f"获取页面失败: {e}")
        return None

def load_picture(data, page_num):
    global count
    if count is None:
        count = 0

    if not data:
        print("页面数据为空，跳过")
        return

    soup = BeautifulSoup(data, 'lxml')
    img_tags = soup.find_all('img')
    for img in img_tags:
        with lock:
            if count >= max_count:
                return

        img_src = img.get('data-original') ##有点地方的'src'只是一个占位符，所以先提取'data-original'
        if (not img_src or
                'url_none.png' in img_src or
                'erweima' in img_src or
                'validate.gif' in img_src or
                'logo' in img_src.lower() or
                'icon' in img_src.lower()):
            continue

        if img_src:
            if not img_src.startswith('http'):
                img_src = 'https:' + img_src

            with lock:
                if count < max_count:
                    count += 1
                    current_count = count

            # 简单下载
            try:
                response = requests.get(img_src, timeout=10)
                filename = f'book_images/image_{current_count}.jpg'
                with open(filename, 'wb') as f:
                    f.write(response.content)
                print(f"第{page_num}页下载第{current_count}张图片:{img_src}")
            except:
                print(f"第{page_num}页第{current_count}张图片下载失败")
def crawl_page(page_num):
    url = f'https://search.dangdang.com/?key=%CA%E9%B0%FC%D0%A1%D1%A7%C9%FA%C4%D0&act=input&page_index={page_num}'
    print(f"正在爬取第{page_num}页...")

    html_data = get_html(url)
    load_picture(html_data,page_num)
def main():
    thread=[]
    print("开始爬取书包图片...")
    print(f"目标数量: 148张")
    print(f"目标页面: {page}")
    threads = []
    for i in range(1,page+1):
        t = threading.Thread(target=crawl_page, args=(i,))
        threads.append(t)
        t.start()
    # 等待所有线程完成
    print("等待所有线程完成...")
    for t in threads:
        t.join()

    print(f"\n爬取完成！总共找到{count}张图片")

if __name__ == '__main__':
    main()
