import re
import requests
from bs4 import BeautifulSoup
import sys

sys.setrecursionlimit(1000000)  # 例如这里设置为一百万

root_url = 'http://www.jzfzb.com'
all_urls = set()
queue_urls = []


def get_all_url(queue_url: list):
    if not queue_url: return
    if queue_url[0] not in all_urls:
        all_urls.add(queue_url[0])
        html = requests.get(queue_url[0]).text
        html_soup = BeautifulSoup(html, 'html.parser')
        a_soups = html_soup.find_all('a', attrs={'href': re.compile('/')})

        if a_soups:
            for a_soup in a_soups:
                href = a_soup.attrs['href']

                if not href.startswith('http'):
                    if href != '/':
                        href = root_url + href.strip()
                        if href not in queue_url:
                            queue_url.append(href)
                else:
                    all_urls.add(href)

    queue_url.pop(0)
    # print(queue_url)
    get_all_url(queue_url)


def run():
    get_all_url([root_url])
    print(all_urls)


if __name__ == '__main__':
    run()
