# 用于暴力遍历学校网页
import requests
import multiprocessing
import os
from concurrent.futures import ThreadPoolExecutor, as_completed
from tqdm import tqdm
import time

# 基本 URL 和输出文件名
base_url = "https://cise.njtech.edu.cn/info/"
output_file = "valid_pages.txt"

# 获取系统核心数，确定最大线程数
max_threads = min(32, multiprocessing.cpu_count())  # 限制线程数量，避免过多线程占用资源

# 文件写入锁
write_lock = multiprocessing.Lock()

def check_url(url):
    """检查生成的 URL 是否有效"""
    try:
        response = requests.get(url, timeout=5)  # 超时设置为5秒
        if response.status_code == 200:
            # 写入文件时加锁
            with write_lock:
                with open(output_file, "a") as file:
                    file.write(f"{url}\n")
        return True
    except requests.exceptions.RequestException:
        return False

def process_urls_in_batch(url_batch, executor):
    """并行处理一批 URL"""
    futures = [executor.submit(check_url, url) for url in url_batch]
    for future in as_completed(futures):
        future.result()

def main():
    # 如果输出文件存在，先删除它
    if os.path.exists(output_file):
        os.remove(output_file)

    # 遍历两个部分的数字范围
    num1_range = range(1086, 10000)  # 自定义调整范围
    num2_range = range(1000, 10000)  # 自定义调整范围

    # 构建 URL 生成器，不预先占用大量内存
    def url_generator():
        for num1 in num1_range:
            for num2 in num2_range:
                yield f"{base_url}{num1}/{num2}.htm"

    # 设置批次处理参数
    batch_size = 1024  # 限制每次处理的 URL 数量，避免过多占用内存

    # 创建线程池
    with ThreadPoolExecutor(max_workers=max_threads) as executor:
        # 计数器总共 URL 数量
        total_urls = len(num1_range) * len(num2_range)

        # 使用一个进度条跟踪整体进度
        with tqdm(total=total_urls, desc="Scanning URLs") as pbar:
            # 逐批处理 URL
            url_batch = []
            for url in url_generator():
                url_batch.append(url)
                if len(url_batch) == batch_size:
                    process_urls_in_batch(url_batch, executor)
                    pbar.update(len(url_batch))  # 更新进度条
                    url_batch.clear()

            # 处理最后一批 URL
            if url_batch:
                process_urls_in_batch(url_batch, executor)
                pbar.update(len(url_batch))

if __name__ == "__main__":
    start_time = time.time()  # 记录开始时间
    main()
    end_time = time.time()  # 记录结束时间
    print(f"扫描完成，耗时：{end_time - start_time:.2f}秒")
