import requests
import time

# 配置文件路径和参数
INPUT_FILE = "url.txt"  # 输入文件
OUTPUT_FILE = "200_urls.txt"  # 结果保存文件
TIMEOUT = 10  # 请求超时时间（秒）
HEADERS = {  # 请求头（防反爬）
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36'
}


def check_url(url):
    """检查URL状态码，返回状态码和是否成功"""
    try:
        # 自动补全协议头（若缺失）
        if not url.startswith(("http://", "https://")):
            url = "http://" + url

        response = requests.get(
            url,
            headers=HEADERS,
            timeout=TIMEOUT,
            allow_redirects=True  # 允许重定向
        )
        return response.status_code, True
    except requests.exceptions.RequestException as e:
        # 捕获所有请求异常（超时/连接错误/SSL错误等）
        if hasattr(e, 'response') and e.response is not None:
            return e.response.status_code, False  # 返回服务器响应的状态码
        return None, False  # 无响应（如DNS解析失败）


# 核心处理逻辑
success_count = 0
total_count = 0

print(f"▶ 开始检测URL状态（输入文件: {INPUT_FILE}）")
with open(INPUT_FILE, 'r') as infile, open(OUTPUT_FILE, 'w') as outfile:
    for line in infile:
        url = line.strip()
        if not url:  # 跳过空行
            continue

        total_count += 1
        status_code, success = check_url(url)

        # 实时输出检测进度
        print(f"⏳ 检测中: {url.ljust(50)} → 状态码: {status_code or '请求失败'}", end='\r')

        # 仅保存200状态码的URL
        if status_code == 200:
            success_count += 1
            outfile.write(url + '\n')
            print(f"✅ 有效URL: {url}")

print(f"\n✔ 检测完成！共处理 {total_count} 个URL，{success_count} 个返回200状态码")
print(f"✔ 有效URL已保存至: {OUTPUT_FILE}")