import re
import argparse
import sys
import os
import time
import datetime
import requests
from urllib.parse import unquote
import platform
import subprocess

def get_current_date():
    """获取系统当前日期，返回年、月、日三元组"""
    now = datetime.datetime.now()
    return now.year, now.month, now.day

def generate_file_names(year, month, day=None):
    """生成Markdown文件名列表"""
    year_month = f"{year}-{month:02d}"
    file_names = []
    
    if day is None:
        # 只提供了年月，生成该月所有可能的日期
        for day in range(1, 32):
            for hour in range(24):
                for minute in [0, 30]:
                    file_name = f"{year_month}/{day:02d}日{hour:02d}时{minute:02d}分.md"
                    file_names.append(file_name)
    else:
        # 提供了完整日期，只生成当天的文件
        for hour in range(24):
            for minute in [0, 30]:
                file_name = f"{year_month}/{day:02d}日{hour:02d}时{minute:02d}分.md"
                file_names.append(file_name)
    
    return file_names

def find_latest_file(file_names):
    base_url = "https://raw.githubusercontent.com/sharkDoor/vpn-free-nodes/master/node-list/"
    file_names.reverse()  # 从最新的时间开始尝试
    
    print(f"开始查找最新文件，共{len(file_names)}个候选...")
    
    for idx, file_name in enumerate(file_names):
        url = base_url + file_name
        attempt_num = idx + 1
        
        try:
            print(f"尝试请求 [{attempt_num}/{len(file_names)}]: {file_name}")
            response = requests.get(url)
            
            if response.status_code == 200:
                print(f"✅ 找到有效文件: {file_name}")
                return url, file_name
            else:
                print(f"❌ 文件不存在 (状态码: {response.status_code}): {file_name}")
                
        except requests.RequestException as e:
            print(f"⚠️ 请求出错: {str(e)} - {file_name}")
            continue
            
        time.sleep(0.5)  # 避免请求过于频繁
    
    print(f"❌ 未找到有效文件")
    return None, None

def download_file(url, file_name):
    local_filename = file_name.replace("/", "_")
    try:
        print(f"开始下载文件: {local_filename}")
        response = requests.get(url)
        
        if response.status_code == 200:
            with open(local_filename, 'w', encoding='utf-8') as f:
                f.write(response.text)
            print(f"✅ 下载完成: {local_filename}")
            return local_filename
        else:
            print(f"❌ 下载失败，状态码: {response.status_code}")
            return None
            
    except requests.RequestException as e:
        print(f"⚠️ 下载过程发生错误: {str(e)}")
        return None

def extract_links_from_md(file_path):
    """从Markdown文件中提取Trojan链接，支持复杂表格格式"""
    try:
        with open(file_path, 'r', encoding='utf-8') as f:
            content = f.read()
        
        # 改进的表格识别 - 提取包含trojan://的行
        trojan_pattern = re.compile(
            r'^.*?trojan://.*?$', 
            re.MULTILINE | re.IGNORECASE
        )
        trojan_lines = trojan_pattern.findall(content)
        
        if not trojan_lines:
            print("未找到Trojan链接")
            return []
        
        # 从每行中提取完整的trojan链接
        links = []
        for line in trojan_lines:
            # 提取完整的trojan链接，直到行末或|符号
            link_match = re.search(r'trojan://[^|\s]+', line)
            if link_match:
                link = link_match.group(0)
                links.append(link)
        
        return links
    except Exception as e:
        print(f"提取链接时发生错误: {str(e)}")
        return []

def print_links(links, format_option):
    """格式化输出链接并返回文本"""
    if not links:
        print("未找到有效链接")
        return ""
    
    result = []
    if format_option == 'raw':
        for link in links:
            result.append(link)
    elif format_option == 'url':
        for link in links:
            result.append(link.replace('trojan://', '', 1))
    elif format_option == 'decoded':
        for link in links:
            url_part = link.replace('trojan://', '', 1)
            result.append(unquote(url_part))
    
    # 打印带序号的链接列表
    for idx, link in enumerate(result, 1):
        print(f"{idx}. {link}")
    
    # 返回不带序号的文本（用于剪贴板）
    return '\n'.join(result)

def copy_to_clipboard(text):
    """将文本复制到剪贴板"""
    if not text:
        return
    
    try:
        if platform.system() == "Windows":
            subprocess.run('clip', text=True, input=text, check=True)
            print("✅ 链接已复制到剪贴板")
        elif platform.system() == "Darwin":  # macOS
            subprocess.run('pbcopy', text=True, input=text, check=True)
            print("✅ 链接已复制到剪贴板")
        elif platform.system() == "Linux":
            # 尝试使用xclip或xsel
            if os.system('which xclip') == 0:
                subprocess.run('xclip -selection clipboard', text=True, input=text, shell=True, check=True)
            elif os.system('which xsel') == 0:
                subprocess.run('xsel --clipboard --input', text=True, input=text, check=True)
            else:
                print("⚠️ 无法找到剪贴板工具(xclip/xsel)，请手动复制")
        else:
            print("⚠️ 不支持的操作系统，无法复制到剪贴板")
            
    except Exception as e:
        print(f"⚠️ 复制到剪贴板时发生错误: {str(e)}")

def main():
    parser = argparse.ArgumentParser(description='自动下载最新节点文件并提取链接')
    parser.add_argument('--date', type=str, help='指定日期范围，格式为YYYY-MM或YYYY-MM-DD')
    parser.add_argument('-f', '--format', choices=['raw', 'url', 'decoded'], default='raw',
                        help='输出格式 (raw: 原始链接, url: 仅URL部分, decoded: 解码后的链接)')
    parser.add_argument('--no-clipboard', action='store_true', help='不复制到剪贴板')
    args = parser.parse_args()

    # 1. 下载最新文件
    if args.date:
        parts = args.date.split('-')
        if len(parts) == 2:
            year, month, day = int(parts[0]), int(parts[1]), None
        elif len(parts) == 3:
            year, month, day = int(parts[0]), int(parts[1]), int(parts[2])
        else:
            print("日期格式错误，请使用YYYY-MM或YYYY-MM-DD格式")
            return
    else:
        year, month, day = get_current_date()
    
    file_names = generate_file_names(year, month, day)
    url, file_name = find_latest_file(file_names)
    
    if not url or not file_name:
        print("下载失败，无法继续")
        return
    
    downloaded_file = download_file(url, file_name)
    if not downloaded_file:
        print("下载文件失败，无法继续")
        return
    
    # 2. 提取链接
    print(f"开始从 {downloaded_file} 中提取链接...")
    links = extract_links_from_md(downloaded_file)
    
    if not links:
        print("未提取到有效链接")
        return
    
    print(f"✅ 成功提取 {len(links)} 个链接")
    
    # 3. 输出并复制到剪贴板
    link_text = print_links(links, args.format)
    if not args.no_clipboard:
        copy_to_clipboard(link_text)

if __name__ == "__main__":
    main()