import requests
import lxml.etree
from queue import Queue
import time


class BFSSpider():
    def __init__(self):
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4015.0 Safari/537.36"
        }

    def get_content(self, url):
        response = requests.get(
            url=url,
            headers=self.headers
        )
        response.encoding = response.apparent_encoding  # 解决乱码
        mytree = lxml.etree.HTML(response.text)
        return mytree

    def geteveryurl(self, html):
        urllist = []
        for url in html.xpath('//div[@class="content-wrapper"]//a/@href'):
            if '/item/' in url and 'https://baike.baidu.com' not in url:
                urllist.append('https://baike.baidu.com'+url)
        return urllist

    def BFS_url(self, url):
        queue = Queue()
        queue.put(url)
        while not queue.empty():  # 如果队列内容不为空
            url = queue.get()  # 取出url
            html = self.get_content(url)
            print("抓取", url)
            time.sleep(1.5)
            urllist = self.geteveryurl(html)  # 提取页面的链接，压入队列
            if len(urllist) != 0:
                for myurl in urllist:
                    queue.put(myurl)


if __name__ == "__main__":
    Spider = BFSSpider()
    Spider.BFS_url(
        'https://baike.baidu.com/item/%E8%BF%88%E5%85%8B%E5%B0%94%C2%B7%E6%B3%95%E6%96%AF%E5%AE%BE%E5%BE%B7/1152286')