import os
import time
import requests
import re


# 配置信息
HEADERS = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
    "Accept-Language": "zh-CN,zh;q=0.9",
}
SAVE_DIR = "tieba_images"
MAX_PAGES = 1
REQUEST_INTERVAL = 3

#吊图吧主页的内容，爬取
def get_page(url):
    try:
        print(f"正在请求: {url}")
        response = requests.get(url, headers=HEADERS, timeout=10)
        response.encoding = 'utf-8'
        print(f"响应状态码: {response.status_code}")
        
        return response.text if response.status_code == 200 else None
    except Exception as e:
        print(f"请求失败: {e}")
        return None

#通过主页内容，获取每个帖子的链接
def parse_post_links(html):
    links = '<div class="threadlist_title pull_left j_th_tit  member_thread_title_frs ">.*?href="(.*?)".*?</div>'
    post_links = re.findall(links, html, re.S)
    return post_links

def parse_images(html):
    links = '<cc>.*?src="(.*?)".*?<cc>'
    images = re.findall(links, html, re.S)
    return images

#传入参数有两个，url是图片网址，一个是文件夹名称，会根据不同的帖子创建不同的文件夹
def download_image(url, num, save_folder):
    img_path = save_folder+'-' +str(num) + '.jpg'
    response = requests.get(url, headers=HEADERS, timeout=10)
    response.raise_for_status()  # 检查请求是否成功
    try:
        with open(img_path, 'wb') as f:
            f.write(response.content)
        print(f"下载成功: {img_path}")
    except Exception as e:
        print(f"下载失败: {e}")


def main():
    os.makedirs(SAVE_DIR, exist_ok=True)
    for page in range(MAX_PAGES):
        print(f"正在处理第 {page+1} 页")
        # 手动构造编码后的URL
        target_url = f"https://tieba.baidu.com/f?&kw=%E5%90%8A%E5%9B%BE&ie=utf-8&pn={page*50}"
        print("实际请求URL:", target_url)
        # 发送请求获取网页内容,返回的参数命名为html
        html = get_page(target_url)
        if html:
            post_links = parse_post_links(html)
            print(f"发现 {len(post_links)} 个帖子")

            for link in post_links:
                #进入每个帖子，url
                print(f"正在处理帖子: {link}")
                every_url = 'https://tieba.baidu.com'+link
                #进入每个帖子，获取帖子内容
                html_post = get_page(every_url)
                if html_post:
                    images = parse_images(html_post)
                    print(f"发现 {len(images)} 张图片")
                    save_folder = os.path.join(SAVE_DIR, link.split('/')[-1])
                    for i in range(len(images)):
                        if not images[i].startswith('http'):
                            continue
                        download_image(images[i], i, save_folder)
                    
                    
                   
                        # time.sleep(REQUEST_INTERVAL)
                time.sleep(REQUEST_INTERVAL)


if __name__ == "__main__":
    main()




    
    
   