## 最简单的爬虫访问方式
## 百度
import random
import re
import threading
import time
from queue import Queue

import requests


def BDCrawler():
    url = 'http://www.baidu.com'  # 爬取网址
    response = requests.get(url)  # get 请求方式

    return response.content.decode()  # 返回数据的解读方式


# print(BDCrawler())


class DouTu(object):
    """斗图吧爬去"""
    def __init__(self):
        self.url = "https://tieba.baidu.com/f"
        self.header = {"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'}
        self.params = {"kw": "斗图"}
        self.url_q = Queue(maxsize=50)

    def respons(self, url):
        response = requests.get(url, params=self.params, headers=self.header)

        resp = response.content.decode()
        personal_url_list = re.findall(r'/p/\d+', resp)

        return personal_url_list

    def deal_with(self):
        personal_url_list = self.respons(self.url)
        for url in personal_url_list:
            response = requests.get('https://tieba.baidu.com' + url)
            try:
                resp = response.content.decode()
                png_url_list = re.findall(r'src="(https://.*?\.jpg)"', resp)
                for i in png_url_list:
                    self.url_q.put(i)
                time.sleep(0.5)
            except Exception as e:
                pass

    def storage(self):
        n = 0
        while True:
            url = self.url_q.get()
            response = requests.get(url)
            resp = response.content
            with open('./crawler_learning/tupian/{}.png'.format(n), 'wb') as f:
                f.write(resp)
            n += 1
            self.url_q.task_done()
            if len(threading.enumerate()) <= 2:
                return

    def run(self):
        t1 = threading.Thread(target=self.deal_with)
        t2 = threading.Thread(target=self.storage)
        t1.start()
        t2.start()

        t1.join()
        t2.join()


if __name__ == '__main__':
    doutu = DouTu()
    doutu.run()
