import os
import time
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin, quote

# 配置信息
HEADERS = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
    "Accept-Language": "zh-CN,zh;q=0.9",
    "Accept-Encoding": "gzip, deflate, br",
    "Connection": "keep-alive",
    "Cookie": "你的Cookie",
    "Referer": "https://tieba.baidu.com/"
}
SAVE_DIR = "tieba_images"
MAX_PAGES = 1
REQUEST_INTERVAL = 3

def get_page(url):
    try:
        print(f"正在请求: {url}")
        response = requests.get(url, headers=HEADERS, timeout=10)
        response.encoding = 'utf-8'
        print(f"响应状态码: {response.status_code}")
        
        # 保存页面内容到本地文件
        with open("tieba_page.html", "w", encoding="utf-8") as f:
            f.write(response.text)
        print("页面内容已保存到 tieba_page.html")
        
        return response.text if response.status_code == 200 else None
    except Exception as e:
        print(f"请求失败: {e}")
        return None

def parse_post_links(html):
    soup = BeautifulSoup(html, 'lxml')
    links = []
    # 尝试通过更通用的选择器匹配
    for item in soup.select('a[href*="/p/"]'):
        full_url = urljoin("https://tieba.baidu.com", item['href'])
        print(f"匹配到链接: {full_url}")  # 调试输出
        links.append(full_url)
    return links

def parse_images(html):
    soup = BeautifulSoup(html, 'lxml')
    images = []
    for img in soup.find_all('img', {'class': ['BDE_Image', 'lazy-img']}):
        src = img.get('src') or img.get('data-original')
        if src and src.startswith('http'):
            images.append(src)
    return images

def download_image(url, folder):
    try:
        headers = HEADERS.copy()
        headers["Referer"] = "https://tieba.baidu.com/"
        response = requests.get(url, headers=headers, timeout=10)
        if response.status_code == 200:
            os.makedirs(folder, exist_ok=True)
            filename = os.path.join(folder, url.split('/')[-1])
            with open(filename, 'wb') as f:
                f.write(response.content)
            print(f"下载成功: {filename}")
    except Exception as e:
        print(f"下载失败: {e}")

def main():
    os.makedirs(SAVE_DIR, exist_ok=True)
    for page in range(MAX_PAGES):
        print(f"正在处理第 {page+1} 页")
        # 手动构造编码后的URL
        encoded_kw = quote("吊图", safe='')
        target_url = f"https://tieba.baidu.com/f?kw={encoded_kw}&pn={page*50}"
        print("实际请求URL:", target_url)
        
        html = get_page(target_url)
        if html:
            post_links = parse_post_links(html)
            print(f"发现 {len(post_links)} 个帖子")
            for link in post_links:
                print(f"正在处理帖子: {link}")
                html_post = get_page(link)
                if html_post:
                    images = parse_images(html_post)
                    print(f"发现 {len(images)} 张图片")
                    save_folder = os.path.join(SAVE_DIR, link.split('/')[-1])
                    for img in images:
                        download_image(img, save_folder)
                        time.sleep(REQUEST_INTERVAL)
                time.sleep(REQUEST_INTERVAL)

if __name__ == "__main__":
    main()








