import requests
import time
import random
import logging
import argparse
from urllib.parse import urlparse, urlunparse
from pathlib import Path
from concurrent.futures import ThreadPoolExecutor, as_completed

# --- 配置日志格式 ---
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

# --- 默认配置 ---
DEFAULT_HEADERS = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36",
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
    "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
    "Connection": "keep-alive",
    "Referer": "https://www.google.com/",
}


class ProxyScraper:
    """
    一个用于检测有效URL并从中抓取SOCKS5代理的类。
    """

    def __init__(self, input_file, valid_url_file, proxy_file, timeout, workers):
        self.input_file = Path(input_file)
        self.valid_url_file = Path(valid_url_file)
        self.proxy_file = Path(proxy_file)
        self.timeout = timeout
        self.workers = workers
        self.session = requests.Session()
        self.session.headers.update(DEFAULT_HEADERS)

    def _check_url(self, url):
        """
        检查单个URL的有效性。
        返回 (状态, 完整URL) 或 None。
        """
        try:
            full_url = url
            if not url.startswith(("http://", "https://")):
                full_url = "https://" + url  # 优先尝试HTTPS

            response = self.session.get(full_url, timeout=self.timeout, allow_redirects=True)

            if response.status_code == 200:
                logging.info(f"✅ 有效URL: {response.url} (源: {url})")
                return "valid", response.url
            else:
                logging.warning(f"⚠️ 无效URL: {full_url} (状态码: {response.status_code})")
                return "invalid", None

        except requests.exceptions.RequestException as e:
            # 如果HTTPS失败，尝试HTTP
            if "https://" in full_url:
                try:
                    http_url = full_url.replace("https://", "http://")
                    response = self.session.get(http_url, timeout=self.timeout, allow_redirects=True)
                    if response.status_code == 200:
                        logging.info(f"✅ 有效URL (HTTP): {response.url} (源: {url})")
                        return "valid", response.url
                except requests.exceptions.RequestException:
                    pass  # HTTP也失败，则忽略

            logging.error(f"❌ 请求失败: {url} | 错误: {type(e).__name__}")
            return "error", None

    def _get_proxies_status_url(self, url):
        """根据原始URL构造代理状态URL"""
        parsed_url = urlparse(url)
        if '/web/' in parsed_url.path:
            new_path = parsed_url.path.replace('/web/', '/proxies_status', 1)
        else:
            new_path = parsed_url.path.rstrip('/') + '/proxies_status'
        return urlunparse(parsed_url._replace(path=new_path))

    def _fetch_proxies(self, url):
        """
        从单个URL获取SOCKS5代理列表。
        """
        proxy_url = self._get_proxies_status_url(url)
        try:
            # 添加随机延迟（防止高频访问）
            time.sleep(random.uniform(0.2, 0.8))

            response = self.session.get(proxy_url, timeout=self.timeout)
            response.raise_for_status()  # 如果状态码不是200-299，抛出HTTPError

            json_data = response.json()

            socks5_proxies = [
                f"socks5://{proxy['ip']}:{proxy['port']}"
                for proxy in json_data.get('proxies', [])
                if proxy.get('protocol', '').lower() == 'socks5' and 'ip' in proxy and 'port' in proxy
            ]

            if socks5_proxies:
                logging.info(f"👍 从 {url} 发现 {len(socks5_proxies)} 个SOCKS5代理")
            return socks5_proxies

        except requests.exceptions.HTTPError as e:
            logging.warning(f"⚠️ 代理状态页不可用: {proxy_url} (状态码: {e.response.status_code})")
        except requests.exceptions.RequestException as e:
            logging.error(f"❌ 获取代理时请求失败 [{proxy_url}]: {type(e).__name__}")
        except requests.exceptions.JSONDecodeError:
            logging.error(f"❌ 无法解析JSON响应: {proxy_url}")
        return []

    def run(self):
        """执行整个流程"""
        # --- 阶段一：并发检测有效URL ---
        logging.info(f"▶ 开始并发检测URL状态 (输入: {self.input_file}, 并发数: {self.workers})")

        if not self.input_file.exists():
            logging.critical(f"输入文件 {self.input_file} 不存在！")
            return

        with self.input_file.open('r', encoding='utf-8') as f:
            urls_to_check = [line.strip() for line in f if line.strip()]

        valid_urls = []
        with ThreadPoolExecutor(max_workers=self.workers) as executor:
            future_to_url = {executor.submit(self._check_url, url): url for url in urls_to_check}
            for future in as_completed(future_to_url):
                status, result_url = future.result()
                if status == "valid":
                    valid_urls.append(result_url)

        if not valid_urls:
            logging.warning("⚠️ 没有发现任何有效网站，程序退出。")
            return

        with self.valid_url_file.open('w', encoding='utf-8') as f:
            for url in valid_urls:
                f.write(url + '\n')
        logging.info(f"✔ 检测完成！发现 {len(valid_urls)} 个有效网站，已保存至 {self.valid_url_file}")

        # --- 阶段二：并发爬取代理 ---
        logging.info(f"▶ 开始从 {len(valid_urls)} 个有效网站并发爬取SOCKS5代理 (并发数: {self.workers})")
        all_proxies = set()  # 使用集合自动去重

        with ThreadPoolExecutor(max_workers=self.workers) as executor:
            future_to_proxies = {executor.submit(self._fetch_proxies, url): url for url in valid_urls}
            for future in as_completed(future_to_proxies):
                proxies = future.result()
                if proxies:
                    all_proxies.update(proxies)

        if not all_proxies:
            logging.warning("⚠️ 未找到任何SOCKS5代理。")
            return

        with self.proxy_file.open('w', encoding='utf-8') as f:
            for proxy in sorted(list(all_proxies)):  # 排序后写入
                f.write(proxy + '\n')
        logging.info(f"✔ 爬取完成！共发现 {len(all_proxies)} 个唯一SOCKS5代理，已保存至 {self.proxy_file}")


def main():
    parser = argparse.ArgumentParser(description="一个高效的SOCKS5代理爬取工具。")
    parser.add_argument('-i', '--input', default="url.txt", help="包含URL列表的输入文件名。")
    parser.add_argument('-o', '--output', default="200_urls.txt", help="保存有效URL的输出文件名。")
    parser.add_argument('-p', '--proxy-file', default="代理.txt", help="保存SOCKS5代理的输出文件名。")
    parser.add_argument('-t', '--timeout', type=int, default=10, help="网络请求超时时间（秒）。")
    parser.add_argument('-w', '--workers', type=int, default=20, help="并发执行的线程数。")

    args = parser.parse_args()

    scraper = ProxyScraper(
        input_file=args.input,
        valid_url_file=args.output,
        proxy_file=args.proxy_file,
        timeout=args.timeout,
        workers=args.workers
    )
    scraper.run()


# --- 主程序入口 ---
if __name__ == "__main__":
    main()