import re

import requests


headers = {
    "User-Agent":"User-Agent, Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv,2.0.1) Gecko/20100101 Firefox/4.0.1"
}

def get_html(url):
    try:
        res = requests.get(url,headers=headers)
        return res.text
    except:
        return ""
#获取子url列表

def get_son_url(url):
    #获取网页内容
    html = get_html(url)
    href_re = '<a .*?href="(.*?)".*?>'
    href_list = re.findall(href_re,html,re.S)
    # print(href_list)
    return href_list
def deep_crawer(url):
    if deep_dict[url] >3:
        return
    print("*"*100)
    print("\t"*deep_dict[url],"当前层级:%d" % deep_dict[url])
    print("*" * 100)
    son_url_list = get_son_url(url)
    for son_url in son_url_list:
        #过滤出有效的连接  http开头就是有效
        if son_url.startswith('http'):
            if son_url not in deep_dict:
                deep_dict[son_url] = deep_dict[url]+1
                deep_crawer(son_url)


if __name__ == "__main__":
    url = "https://www.baidu.com/s?wd=岛国邮箱"
    deep_dict = {} #层级控制   深度爬取
    #key存放 父url 子url  子子url   value值 就是层级  1 2 3
    deep_dict[url] = 1  #默认第一级  父url
    deep_crawer(url)