import random
import time

import requests
from lxml import etree
from urllib import parse


# 增量爬取贴吧所有图片
class TieBaImagesSpider:
    def __init__(self):
        self.url = 'https://tieba.baidu.com/f?ie=utf-8&kw={}&pn={}'
        #         获取二级页面地址xpath：//div[@class="t_con cleafix"]/div/div/div/a/@href
        self.two_url = 'https://tieba.baidu.com{}'
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0'}

    def get_html(self, url):
        one_html = requests.get(url=url, headers=self.headers).text
        p = etree.HTML(one_html)
        # /p/8803059197 这样的地址后缀列表
        url_list = p.xpath('//div[@class="t_con cleafix"]/div/div/div/a/@href')
        # print(url_list) 没想到之前一直找不到的原因居然是user-agent不行
        for url in url_list:
            self.get_image(url)

    def get_image(self, url):
        html = requests.get(url=self.two_url.format(url), headers=self.headers).text
        p = etree.HTML(html)
        # 视频xpth：' | //div[@class="mediago-video svelte-1nyxj31"]/video/@src'
        img_list = p.xpath('//div[@class="d_post_content j_d_post_content  clearfix"]/img[@class="BDE_Image"]/@src')
        for img in img_list:
            # 获取图片链接
            html = requests.get(url=img, headers=self.headers).content
            # 图片链接后缀没有。img还得每次自己加上png？
            filename = img[-10:]+'.png'
            with open(filename, 'wb') as f:
                f.write(html)
            print(filename, '下载成功')
            time.sleep(random.randint(1, 2))

    def run(self):
        name = input('请输入贴吧名')
        name = parse.quote(name)
        start = int(input('请输入起始页'))
        end = int(input('请输入终止页'))
        for p in range(start, end + 1):
            pn = (p - 1) * 50
            url = self.url.format(name, pn)
            self.get_html(url)


if __name__ == '__main__':
    spider = TieBaImagesSpider()
    spider.run()
