import requests
from lxml import html  # 添加在文件顶部
import os
import re

class TiebaSpider:
    def __init__(self, main_url):
        self.main_url = main_url
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36...'
        }
        self.image_dir = 'tieba_images'
        os.makedirs(self.image_dir, exist_ok=True)
        print(f"=== 爬虫初始化完成 ===")
        print(f"目标贴吧：{main_url}")
        print(f"图片存储路径：{os.path.abspath(self.image_dir)}")

    def _get_post_links(self):
        """获取贴吧主题帖链接"""
        response = requests.get(self.main_url, headers=self.headers)
        # 使用lxml的XPath解析
        tree = html.fromstring(response.content)
        # XPath匹配帖子链接（修正原CSS选择器语法）
        return [
            href for href in tree.xpath('//*[@id="thread_list"]/li//a/@href') 
            if href.startswith('/p/')  # 新增过滤条件：仅保留帖子链接
        ]
        print(f"=== 获取到有效帖子链接 {len(links)} 个 ===")
        return links

    def _process_post(self, post_url):
        """处理单个帖子页面"""
        # 新增URL有效性检查
        if not post_url.startswith('/p/') or len(post_url) < 10:
            print(f"跳过无效链接：{post_url}")
            return
        full_url = f'https://tieba.baidu.com{post_url}' 
        print(f"\n正在处理帖子：{full_url}")
        
        response = requests.get(full_url, headers=self.headers)
        tree = html.fromstring(response.content)
        
        # 修改后的XPath定位（基于您提供的具体路径）
        img_urls = tree.xpath('//cc//img[@class="BDE_Image"]/@src')
        # 或者更精确的版本
        # img_urls = tree.xpath('/html/body/div[3]/div/div[2]/div/div[4]/div[1]/div[3]/div[5]/div[2]/div[1]/cc/div[2]/div/img/@src')
        
        for idx, img_url in enumerate(img_urls, 1):
            print(f"  图片{idx}: {img_url}")  # 新增URL输出
            self._download_image(img_url)

    def _download_image(self, url):
        try:
            img_data = requests.get(url, headers=self.headers, timeout=10).content
            filename = re.search(r'sign=(.*?)\.jpg', url).group(1)[-10:] + '.jpg'
            with open(os.path.join(self.image_dir, filename), 'wb') as f:
                f.write(img_data)
            print(f"✓ 下载成功：{filename}")
        except Exception as e:
            print(f"✗ 下载失败：{os.path.basename(url)} | 原因：{str(e)}")

    def run(self):
        print("\n=== 开始爬取 ===")
        post_links = self._get_post_links()
        for link in post_links[:10]:  # 测试时限制10个帖子
            self._process_post(link)
        print("\n=== 任务完成 ===")

if __name__ == '__main__':
    spider = TiebaSpider('https://tieba.baidu.com/f?kw=%E5%90%8A%E5%9B%BE')
    spider.run()


















