import os
import requests
import sys
import time
from urllib.parse import urljoin
from concurrent.futures import ThreadPoolExecutor, as_completed

# 配置部分
ADMIN_DIR = 'adminDict'
STATE_FILE = '.scan_state'
RESULT_FILE = 'results.txt'
MAX_THREADS = 20  # 并发线程数
TIMEOUT = 5
HEADERS = {
    'User-Agent': 'AdminScanner/1.0'
}

def load_all_paths():
    """加载所有 txt 文件中的路径，并自动处理编码问题"""
    all_paths = set()
    if not os.path.exists(ADMIN_DIR):
        print(f"[!] Admin dictionary directory '{ADMIN_DIR}' does not exist.")
        return set()

    for filename in os.listdir(ADMIN_DIR):
        if filename.endswith('.txt'):
            file_path = os.path.join(ADMIN_DIR, filename)
            try:
                with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
                    for line in f:
                        path = line.strip()
                        if path and not path.startswith('#'):
                            all_paths.add(path)
            except Exception as e:
                print(f"[!] Error reading file {file_path}: {e}")
    return all_paths

def read_state():
    """读取已扫描过的路径"""
    if os.path.exists(STATE_FILE):
        with open(STATE_FILE, 'r', encoding='utf-8') as f:
            return set(line.strip() for line in f)
    return set()

def write_state_batch(paths):
    """批量记录已扫描的路径"""
    with open(STATE_FILE, 'a', encoding='utf-8') as f:
        f.writelines([path + '\n' for path in paths])

def save_result_batch(urls):
    """批量保存找到的结果"""
    with open(RESULT_FILE, 'a', encoding='utf-8') as f:
        f.writelines([url + '\n' for url in urls])

def check_single_url(url, path, scanned, results_lock):
    full_url = urljoin(url, path)
    try:
        response = requests.get(full_url, timeout=TIMEOUT, headers=HEADERS, allow_redirects=True)
        if response.status_code == 200:
            print(f"[+] Found: {full_url}")
            return full_url
        else:
            print(f"[-] Not found: {full_url} (Status: {response.status_code})")
    except Exception as e:
        print(f"[!] Error accessing {full_url}: {e}")
    return None

def check_admin(url, paths, scanned):
    found_list = []
    to_write_state = []

    with ThreadPoolExecutor(max_workers=MAX_THREADS) as executor:
        future_to_path = {
            executor.submit(check_single_url, url, path, scanned, None): path
            for path in paths
        }

        for future in as_completed(future_to_path):
            path = future_to_path[future]
            to_write_state.append(path)
            try:
                result = future.result()
                if result:
                    found_list.append(result)
            except Exception as exc:
                print(f"[!] Thread generated an exception: {exc}")

    if found_list:
        save_result_batch(found_list)
    write_state_batch(to_write_state)

    return found_list

if __name__ == "__main__":
    if len(sys.argv) != 2:
        print("Usage: python main.py <target_url>")
        sys.exit(1)

    target_url = sys.argv[1].rstrip('/')
    print(f"[*] Starting admin scanner on: {target_url}")

    scanned_paths = read_state()
    all_paths = load_all_paths()
    new_paths = list(all_paths - scanned_paths)

    if not new_paths:
        print("[*] No new paths to scan.")
        sys.exit(0)

    print(f"[*] Found {len(new_paths)} paths to scan.")

    found = check_admin(target_url, new_paths, scanned_paths)

    print(f"[*] Scan complete. Found {len(found)} admin paths.")