import re

import requests


headers = {
    "User-Agent":"User-Agent, Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv,2.0.1) Gecko/20100101 Firefox/4.0.1"
}

def get_html(url):
    try:
        res = requests.get(url,headers=headers)
        return res.text
    except:
        return ""
#获取子url列表
def get_son_url(url):
    #获取网页内容
    html = get_html(url)
    href_re = '<a .*?href="(.*?)".*?>'
    href_list = re.findall(href_re,html,re.S)
    # print(href_list)
    return href_list
def width_crawer(start_url):
    url_quene = []  #模拟队列
    url_quene.append(start_url) #让 start_url先入队列
    while len(url_quene)>0:
        url = url_quene.pop(0)
        print("\t"*deep_dict[url],"当前层级:%d" % deep_dict[url])
        if deep_dict[url] <= 3 :
            sonurl_list = get_son_url(url)
            for sonurl in sonurl_list:
                if sonurl.startswith('http'):
                    if sonurl not in deep_dict:
                        deep_dict[sonurl] = deep_dict[url] + 1
                        url_quene.append(sonurl)


if __name__ == "__main__":
    url = "https://www.baidu.com/s?wd=岛国邮箱"
    deep_dict = {} #层级控制   深度爬取
    #key存放 父url 子url  子子url   value值 就是层级  1 2 3
    deep_dict[url] = 1  #默认第一级  父url
    width_crawer(url)