import requests
from bs4 import BeautifulSoup


def find_links(url):
    resp = requests.get(url, timeout=10)
    soup = BeautifulSoup(resp.text, 'html.parser')
    links = set()
    for a in soup.find_all('a', href=True):
        href = a['href']
        if href.startswith('http'):
            links.add(href)
        elif href.startswith('/'):
            # 补全相对路径
            from urllib.parse import urljoin
            links.add(urljoin(url, href))
    return list(links)


def check_links(links):
    bad_links = []
    for link in links:
        try:
            r = requests.head(link, allow_redirects=True, timeout=5)
            if r.status_code != 200:
                bad_links.append((link, r.status_code))
        except Exception as e:
            bad_links.append((link, str(e)))
    return bad_links


def main(url):
    links = find_links(url)
    bad_links = check_links(links)
    return bad_links

if __name__ == '__main__':
    url = input('请输入要检测的网页URL: ')
    result = main(url)
    if result:
        print('检测到异常链接:')
        for link, reason in result:
            print(f'{link} -> {reason}')
    else:
        print('所有链接均正常。') 