import requests
from bs4 import BeautifulSoup

def get_all_links(url, base_url, depth, max_depth=5, visited=None):
    if visited is None:
        visited = set()
    
    if depth > max_depth or url in visited:
        return []

    visited.add(url)
    print(f"Visiting: {url} (Depth: {depth})")

    try:
        response = requests.get(url, timeout=10)
        if response.status_code == 200:
            soup = BeautifulSoup(response.text, 'html.parser')
            hrefs = []

            for link in soup.find_all('a', href=True):
                href = link['href']
                full_url = requests.compat.urljoin(base_url, href)
                if full_url.startswith(base_url) and full_url not in visited:
                    hrefs.append(full_url)
                    if depth < max_depth:
                        hrefs.extend(get_all_links(full_url, base_url, depth + 1, max_depth, visited))

            return hrefs
        else:
            print(f"Failed to retrieve links from {url}. Status code: {response.status_code}")
            return []
    except requests.exceptions.RequestException as e:
        print(f"An error occurred while trying to retrieve links from {url}: {e}")
        return []

def scan_website(url, max_depth=5):
    base_url = url.rstrip('/') + '/'
    all_links = get_all_links(base_url, base_url, 1, max_depth)

    with open('eyou-links.txt', 'w') as file:
        for link in all_links:
            file.write(link + '\n')

    print(f"Found {len(all_links)} links.")
    print('success')

url = 'http://www.jiangshuntech.com/'
scan_website(url, max_depth=10)  # Increase max_depth to explore more pages
