import argparse
import concurrent.futures
import requests
import os
import forum_scanner
import utils
from urllib.parse import urlparse
from tqdm import tqdm


# 配置参数
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
TIMEOUT = 8
THREADS = 15

def sanitize_filename(url):
    """生成安全的文件名"""
    parsed = urlparse(url)
    # 提取域名部分并替换特殊字符
    domain = parsed.netloc.replace('.', '_')
    path = parsed.path.replace('/', '_') if parsed.path else ''
    return f"{domain}{path}.html"


def save_html(content, url, output_dir):
    """保存网页内容到文件"""
    filename = sanitize_filename(url)
    filepath = os.path.join(output_dir, filename)

    try:
        with open(filepath, 'wb') as f:
            f.write(content)
        return True
    except Exception as e:
        print(f"保存失败 {url}: {str(e)}")
        return False


def check_url(url, output_dir):
    """检测URL可用性并保存内容"""
    try:
        response = requests.get(
            url,
            headers={'User-Agent': USER_AGENT},
            timeout=TIMEOUT,
            allow_redirects=True,
            verify=False,
            stream=True
        )

        # 仅处理2xx状态码
        if 200 <= response.status_code < 300:
            # # 保存原始字节内容
            # success = save_html(response.content, url, output_dir)
            # return (url, True, response.status_code)
            # 自动检测编码
            if response.encoding is None:
                text = response.content.decode('utf-8', errors='ignore')
            else:
                text = response.text
            # 检查关键词
            if '論壇' in text:
                success = save_html(response.content, url, output_dir)
                return (url, success, response.status_code)
        return (url, False, response.status_code)
    except Exception as e:
        return (url, False, str(e))


def main():
    parser = argparse.ArgumentParser(description='网页可用性探测器')
    parser.add_argument('-m', '--middles', required=True,
                        help='中间部分文件路径')
    parser.add_argument('-t', '--tlds', default='tlds.txt',
                        help='顶级域名文件路径')
    parser.add_argument('-o', '--output', default='valid_urls.txt',
                        help='URL列表输出文件')
    parser.add_argument('-d', '--html-dir', default='html_output',
                        help='HTML保存目录')
    parser.add_argument('-p', '--protocol', choices=['http', 'https'], nargs='+',
                        default=['http', 'https'], help='检测协议')
    args = parser.parse_args()

    # 创建输出目录
    os.makedirs(args.html_dir, exist_ok=True)

    # 加载数据
    middles = utils.load_list(args.middles)
    tlds = utils.load_list(args.tlds)

    # 生成所有URL组合（支持多协议）
    all_urls = []
    for proto in args.protocol:
        all_urls += utils.generate_urls(middles, tlds, proto)

    print(f"待检测URL总数: {len(all_urls):,}")

    valid_urls = []

    with tqdm(total=len(all_urls), desc="检测进度", unit="url") as pbar:
        with concurrent.futures.ThreadPoolExecutor(max_workers=THREADS) as executor:
            futures = {executor.submit(check_url, url, args.html_dir): url for url in all_urls}

            for future in concurrent.futures.as_completed(futures):
                url, is_valid, status = future.result()
                if is_valid:
                    valid_urls.append(url)
                    tqdm.write(f"[+] 有效 {url} (状态码: {status})")
                pbar.update(1)

    # 保存结果列表
    with open(args.output, 'w', encoding='utf-8') as f:
        f.write('\n'.join(valid_urls))

    print(f"\n有效URL数量: {len(valid_urls)}")
    print(f"HTML文件保存至: {os.path.abspath(args.html_dir)}")
    print(f"URL列表保存至: {args.output}")


if __name__ == '__main__':
    main()