import requests
from lxml import etree
import time
import random
from hashlib import sha1
from fake_useragent import UserAgent

class TiebaSpider:
    def __init__(self):
        self.url = 'http://tieba.baidu.com/f?kw={}&pn={}'

    def get_html(self, url):
        """功能函数1"""
        headers = {'User-Agent':UserAgent(path="fake_useragent.json").random}
        html = requests.get(url=url, headers=headers).text

        return html

    def xfunc(self, html, x):
        """功能函数2"""
        eobj = etree.HTML(html)
        r_list = eobj.xpath(x)

        return r_list

    def sha1_url(self, url):
        """功能函数3"""
        s = sha1()
        s.update(url.encode())

        return s.hexdigest()

    def parse_html(self, url):
        """爬虫逻辑函数:提取帖子链接"""
        first_html = self.get_html(url)
        # 响应内容中将具体帖子数据注释,要取消注释
        first_html = first_html.replace('<!--', '').replace('-->', '')
        first_x = '//div[@class="t_con cleafix"]/div/div/div/a/@href'
        # href_list:['/p/xxx','/p/xxx',...]
        href_list = self.xfunc(first_html, first_x)
        for href in href_list:
            # 获取一个帖子中所有的图片
            self.get_all_images(href)

    def get_all_images(self,href):
        """获取一个帖子中所有图片"""
        second_url = 'http://tieba.baidu.com' + href
        second_html = self.get_html(second_url)
        second_x = '//cc//img[@class="BDE_Image"]/@src | //embed/@data-video'
        # img_url_list:['http://xxx.jpg','','',...]
        img_url_list = self.xfunc(second_html, second_x)
        for img_url in img_url_list:
            # 依次提取每张图片的数据
            self.get_one_image(img_url)
            # 控制频率
            time.sleep(random.uniform(0, 1))

    def get_one_image(self, img_url):
        """获取一张图片"""
        headers = {'User-Agent':UserAgent(path='fake_useragent.json').random}
        img_html = requests.get(url=img_url, headers=headers).content
        # filename: xxxxxx.jpg
        # endswith()
        sha1_url = self.sha1_url(img_url)
        if img_url.endswith('jpg'):
            filename = sha1_url + '.jpg'
        else:
            filename = sha1_url + '.mp4'
        # 保存图片
        with open(filename, 'wb') as f:
            f.write(img_html)

        print(filename,'下载成功')

    def crawl(self):
        """程序入口函数"""
        name = input('输入贴吧名:')
        start = int(input('输入起始页:'))
        end = int(input('输入终止页:'))
        for page in range(start, end + 1):
            pn = (page - 1) * 50
            page_url = self.url.format(name, pn)
            # 调用爬虫逻辑函数
            self.parse_html(page_url)

if __name__ == '__main__':
    spider = TiebaSpider()
    spider.crawl()














