import re
import os
from urllib import request


class TbImg(object):
    def __init__(self):
        self.count = 0
        self.html = ''
        # 百度贴吧XX吧精品贴主页
        self.url = 'https://tieba.baidu.com/f?kw=%E5%9B%BE%E7%89%87&ie=utf-8&tab=good'
        self.headers = {
            # 'Host': 'pic.netbian.com',
            'User-Agent': 'Mozilla / 5.0(WindowsNT6.1;WOW64;rv: 61.0) Gecko / 20100101Firefox / 61.0',
            # 'Accept': 'text / html, application / xhtml + xml, application / xml;q = 0.9, * / *;q = 0.8Accept - Language: zh - CN, zh;q = 0.8, zh - TW;q = 0.7, zh - HK;q = 0.5, en - US;q = 0.3, en;q = 0.2',
            # 'Accept - Encoding': 'gzip, deflate',
            # 'Referer': 'http: // pic.netbian.com / \Cookie: __cfduid = d0b54cd93562aeeaae23d8548efd5b4651532932685;Hm_lvt_14b14198b6e26157b7eba06b390ab763 = 1532932687, 1533220197;yjs_id = af8480b79b2d412d39ee9db73f66b155;Hm_lvt_526caf4e20c21f06a4e9209712d6a20e = 1532932737, 1532932767, 1533219143;ctrl_time = 1;zkhanecookieclassrecord = % 2C66 % 2C'
        }
        self.total = 0
        self.retry_count = 0
        self.title = ''
        self.son_url = ''

    def get_nice_html(self, url):
        """请求精品页源代码"""
        req = request.Request(url=url, headers=self.headers)
        try:
            self.retry_count += 1
            response = request.urlopen(req)
            self.html = response.read().decode("utf-8", "ignore")
        except Exception as e:
            if self.retry_count > 3:
                print("请求源代码失败，地址：{}".format(url))
                return
            print("请求源代码失败，尝试重新发送请求，请稍等...")
            self.get_nice_html()
        else:
            self.retry_count = 0

    def get_next_page(self):
        """获取精品贴的下一页"""
        pattern = re.compile(r'<div class="thread_list_bottom clearfix">.*?<span class="pagination-current pagination-item ">.*?<a href="(.*?)"', re.S)
        res = re.search(pattern, self.html)
        if res:
            self.url = int(res.group(1))
            # print(self.url)
            print("下一页链接已拼接完成"+self.url)
            return True
        else:
            return False

    def parse_nice_html(self):
        """获取当前页精品贴所有帖子的地址和标题"""
        pattern = re.compile(r'threadlist_title.*?<a.*?rel="noreferrer".*?href="(.*?)".*?title="(.*?)".*?</a>.*?</div>', re.S)
        res = re.findall(pattern, self.html)
        for x in res:
            self.title = x[1]
            self.create_directory()
            self.son_url = 'https://tieba.baidu.com' + x[0]
            self.get_son_html(self.son_url)
            self.get_son_total()
            for x in range(1, self.total + 1):
                url = self.son_url + "?pn={}".format(x)
                self.get_son_html(url)
                self.parse_son_html()
            # self.count += 1
            # print(x, self.count)

    def get_son_html(self, url):
        req = request.Request(url=url, headers=self.headers)
        try:
            self.retry_count += 1
            response = request.urlopen(req)
            self.html = response.read().decode("utf-8", "ignore")
        except Exception as e:
            if self.retry_count > 3:
                print("请求源代码失败，地址：{}".format(url))
                return
            print("请求源代码失败，尝试重新发送请求，请稍等...")
            self.get_son_html()
        else:
            self.retry_count = 0

    def get_son_total(self):
        pattern = re.compile(r'max-page="(.*?)"', re.S)
        res = re.search(pattern, self.html)
        self.total = int(res.group(1))

    def parse_son_html(self):
        pattern = re.compile(r'<img class="BDE_Image.*?src="(.*?)"', re.S)
        res = re.findall(pattern, self.html)
        for x in res:
            self.count += 1
            img_name = x.split('/')[-1]
            path = self.title + '/' + img_name
            request.urlretrieve(x, path)
            print('第%s张图片下载完成' % self.count)

    def create_directory(self):
        if not os.path.exists(self.title):
            os.mkdir(self.title)

    def run(self):
        while True:
            self.get_nice_html(self.url)
            self.parse_nice_html()
            self.get_next_page()
            is_next = self.get_next_page()
            if not is_next:
                print('数据爬取完毕，已存入数据库中！')
                break


if __name__ == "__main__":
    s = TbImg()
    s.run()













