import json
import asyncio
import sys
import re
import requests
import multiprocessing
import time
import validators
from multiprocessing import Pool
from multi_task import TaskManager
from urllib.parse import urljoin, urlparse, urlunparse
from bs4 import BeautifulSoup
from typing import Set, List
from parser import parse_video_share_url, parse_video_id, VideoSource
from data_url import src_url,dst_url

sys.setrecursionlimit(1000)
# 根据分享链接解析
def is_valid_url(url: str) -> bool:
    """
    验证URL是否符合条件：
    - 包含 'archives' (不区分大小写)
    - 不包含 'html' (不区分大小写)
    """
    url_lower = url.lower()
    return (
        'archives' in url_lower and  # 包含 archives
        'html' not in url_lower      # 排除 html
    )
def is_archive_url(url: str) -> bool:
    """
    判断 URL 是否包含 archives 字段（不区分大小写）
    匹配场景：
    - 路径: /archives/2024/
    - 参数: ?category=archives
    - 锚点: #archives-section
    """
    parsed = urlparse(url)
    # 检查路径、查询参数、锚点
    return any(
        re.search(r'archives', part, re.IGNORECASE)
        for part in [parsed.path, parsed.query, parsed.fragment]
    )

def extract_urls(base_url: str, max_depth: int = 2) -> List[str]:
    visited = set()
    archive_urls = set()

    headers = {
        "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36",
        "Accept-Encoding": "gzip, deflate"
    }

    for num in range(1,max_depth):
        url = base_url+'//'+str(num)
        current_url = re.sub(r"(?<!:)/{2,}", "/", url)
        print(current_url)
        try:
            print(f"\033[34m[*] 抓取: {current_url}\033[0m")
            response = requests.get(current_url, headers=headers, timeout=15)
            response.raise_for_status()
            visited.add(current_url)

            soup = BeautifulSoup(response.text, 'lxml')

            # 提取所有链接并标准化
            for tag in soup.find_all('a', href=True):
                raw_href = tag['href'].strip()
                full_url = urljoin(current_url, raw_href)
                #print(full_url)
                parsed_url = urlparse(full_url)

                # 清理冗余参数和锚点（保留原始匹配）
                clean_url = urlunparse(parsed_url._replace(fragment="", query=""))
                
                # 去重判断和条件过滤
                if (is_archive_url(full_url) ):
                    if full_url not in archive_urls:
                        archive_urls.add(full_url)
                        #print(f"\033[32m[+] 发现归档链接: {full_url}\033[0m")

        except Exception as e:
            print(f"\033[31m[-] 错误: {current_url} → {str(e)}\033[0m", file=sys.stderr)

    return sorted(archive_urls)
def is_web_url(url: str) -> bool:
    return validators.url(url) and url.startswith(('http://', 'https://'))
if __name__ == "__main__":
    if len(sys.argv) < 2:
        print("错误：缺少参数，目标主页")
        sys.exit(1)
    args = sys.argv
    if(is_web_url(args[1]) is False):
        print("错误：输入非网页地址")
        sys.exit(1)
    print("\n\033[1m=== 归档链接提取器 (Linux 专用) ===\033[0m")
    try:
        if "com" in args[1] or "cn" in args[1]:
            asyncio.run(parse_video_share_url(args[1],0))
        else:
            for idx, item_src_url in enumerate(src_url):
                results = extract_urls(args[1]+item_src_url[0], item_src_url[1])
                dst_url.extend(results)
            dst_url = list(set(dst_url))
            #print("\n\033[1m=== 最终结果 ===\033[0m")
            # 启动任务管理器
            manager = TaskManager(8)
            manager.start()
            # 主线程等待（模拟持续运行）
            try:
                while manager.task_counter < len(dst_url):
                    time.sleep(5)
                    print(f"当前已完成任务数: {manager.task_counter}/{len(dst_url)}")
            except KeyboardInterrupt:
                print("停止接收新任务...")
                manager.shutdown()
    except Exception as e:
        print("download error : "+args[1])
   
    
