#!/usr/bin/env python3
"""
V2ray订阅链接自动获取脚本
每天自动获取V2ray订阅链接内容并存储到指定目录
"""

import os
import sys
import requests
import logging
from datetime import datetime
from pathlib import Path
import time
from typing import List, Dict, Optional
import base64

# 导入配置
try:
    from config import *
except ImportError:
    # 如果配置文件不存在，使用默认配置
    OUTPUT_DIR = "/var/proxy"
    LOG_FILE = "/var/log/proxy_fetcher.log"
    BASE_URL = "https://shareclash.github.io/uploads"
    MAX_RETRIES = 3
    REQUEST_TIMEOUT = 30
    SUBSCRIPTION_COUNT = 5
    USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
    LOG_LEVEL = "INFO"
    LOG_FORMAT = '%(asctime)s - %(levelname)s - %(message)s'
    VALIDATE_CONTENT = True
    MIN_CONTENT_LENGTH = 10

# 配置日志（在日志文件目录不存在时自动创建）。若无权限创建，则回退到用户目录日志。
log_file_expanded = os.path.expanduser(LOG_FILE)
log_dir = os.path.dirname(log_file_expanded)
fallback_log = os.path.expanduser("~/proxy/proxy_fetcher.log")
try:
    if log_dir and not os.path.isdir(log_dir):
        os.makedirs(log_dir, exist_ok=True)
    selected_log = log_file_expanded
except Exception:
    # 回退到用户目录
    fb_dir = os.path.dirname(fallback_log)
    os.makedirs(fb_dir, exist_ok=True)
    selected_log = fallback_log

logging.basicConfig(
    level=getattr(logging, LOG_LEVEL),
    format=LOG_FORMAT,
    handlers=[
        logging.FileHandler(selected_log),
        logging.StreamHandler(sys.stdout)
    ]
)
logger = logging.getLogger(__name__)

class ProxyFetcher:
    """V2ray订阅链接获取器"""
    
    def __init__(self, base_url: str = BASE_URL, 
                 output_dir: str = OUTPUT_DIR):
        """
        初始化获取器
        
        Args:
            base_url: 基础URL
            output_dir: 输出目录
        """
        # 解析命令行参数以选择环境输出目录
        env = None
        for idx, arg in enumerate(sys.argv):
            if arg in ("--env", "-e") and idx + 1 < len(sys.argv):
                env = sys.argv[idx + 1].strip().lower()
                break

        resolved_output_dir = output_dir
        try:
            if env == "dev" and 'OUTPUT_DIR_DEV' in globals():
                resolved_output_dir = OUTPUT_DIR_DEV
            elif env == "prod" and 'OUTPUT_DIR_PROD' in globals():
                resolved_output_dir = OUTPUT_DIR_PROD
        except Exception:
            pass

        self.base_url = base_url
        self.output_dir = Path(os.path.expanduser(resolved_output_dir))
        self.session = requests.Session()
        self.session.headers.update({
            'User-Agent': USER_AGENT
        })
        
        # 确保输出目录存在
        self.output_dir.mkdir(parents=True, exist_ok=True)
        
    def get_current_date_str(self) -> str:
        """获取当前日期字符串，格式为YYYYMMDD"""
        return datetime.now().strftime("%Y%m%d")
    
    def get_subscription_urls_from_github(self) -> List[str]:
        """
        从GitHub README页面解析V2ray订阅链接
        
        Returns:
            订阅链接列表
        """
        github_url = GITHUB_URL
        
        try:
            logger.info(f"正在访问GitHub页面: {github_url}")
            response = self.session.get(github_url, timeout=REQUEST_TIMEOUT)
            response.raise_for_status()
            
            content = response.text
            
            # 查找V2ray订阅链接部分
            import re
            
            # 更精确的正则表达式，匹配V2ray订阅链接
            v2ray_pattern = r'https://shareclash\.github\.io/uploads/\d{4}/\d{2}/\d-\d{8}\.txt'
            urls = re.findall(v2ray_pattern, content)
            
            if urls:
                # 去重并排序
                unique_urls = list(dict.fromkeys(urls))
                unique_urls.sort()  # 按URL排序，确保顺序一致
                
                logger.info(f"从GitHub页面解析到 {len(unique_urls)} 个唯一的V2ray订阅链接")
                
                # 限制数量
                if len(unique_urls) > SUBSCRIPTION_COUNT:
                    logger.info(f"限制为前 {SUBSCRIPTION_COUNT} 个链接")
                    unique_urls = unique_urls[:SUBSCRIPTION_COUNT]
                
                return unique_urls
            else:
                logger.warning("未在GitHub页面找到V2ray订阅链接，使用默认链接生成方式")
                return self.get_subscription_urls_fallback()
                
        except Exception as e:
            logger.error(f"访问GitHub页面失败: {e}")
            logger.info("使用默认链接生成方式")
            return self.get_subscription_urls_fallback()
    
    def get_subscription_urls_fallback(self) -> List[str]:
        """
        默认的订阅链接生成方式（备用方案）
        
        Returns:
            订阅链接列表
        """
        date_str = self.get_current_date_str()
        urls = []
        for i in range(SUBSCRIPTION_COUNT):
            url = f"{self.base_url}/{date_str[:4]}/{date_str[4:6]}/{i}-{date_str}.txt"
            urls.append(url)
        return urls
    
    def get_subscription_urls(self, date_str: str = None) -> List[str]:
        """
        获取订阅链接列表（优先从GitHub解析）
        
        Args:
            date_str: 日期字符串，格式为YYYYMMDD（可选）
            
        Returns:
            订阅链接列表
        """
        # 根据配置决定是否优先从GitHub解析
        if PRIORITY_GITHUB_PARSE:
            urls = self.get_subscription_urls_from_github()
            if urls:
                return urls
        
        # 如果GitHub解析失败或禁用，使用默认方式
        if date_str is None:
            date_str = self.get_current_date_str()
        urls = self.get_subscription_urls_fallback()
        
        return urls
    
    def fetch_content(self, url: str, retries: int = MAX_RETRIES) -> Optional[str]:
        """
        获取URL内容
        
        Args:
            url: 要获取的URL
            retries: 重试次数
            
        Returns:
            获取到的内容，失败返回None
        """
        for attempt in range(retries):
            try:
                logger.info(f"正在获取: {url} (尝试 {attempt + 1}/{retries})")
                response = self.session.get(url, timeout=REQUEST_TIMEOUT)
                response.raise_for_status()
                
                content = response.text
                if content.strip():
                    # 验证内容长度
                    if VALIDATE_CONTENT and len(content.strip()) < MIN_CONTENT_LENGTH:
                        logger.warning(f"获取到的内容过短: {url} (长度: {len(content.strip())})")
                        if attempt < retries - 1:
                            continue
                        else:
                            return None
                    
                    logger.info(f"成功获取内容，长度: {len(content)} 字符")
                    return content
                else:
                    logger.warning(f"获取到的内容为空: {url}")
                    
            except requests.exceptions.RequestException as e:
                logger.error(f"获取失败 (尝试 {attempt + 1}/{retries}): {url} - {e}")
                if attempt < retries - 1:
                    time.sleep(2 ** attempt)  # 指数退避
                    
        return None
    
    def save_content(self, content: str, filename: str) -> bool:
        """
        保存内容到文件
        
        Args:
            content: 要保存的内容
            filename: 文件名
            
        Returns:
            保存是否成功
        """
        try:
            file_path = self.output_dir / filename
            with open(file_path, 'w', encoding='utf-8') as f:
                f.write(content)
            logger.info(f"内容已保存到: {file_path}")
            return True
        except Exception as e:
            logger.error(f"保存文件失败: {filename} - {e}")
            return False
    
    def fetch_all_subscriptions(self) -> Dict[str, bool]:
        """
        获取所有订阅链接内容
        
        Returns:
            文件名到成功状态的映射
        """
        urls = self.get_subscription_urls()
        
        results = {}
        
        for i, url in enumerate(urls, 1):
            filename = f"{i}.txt"
            logger.info(f"处理第 {i} 个订阅链接: {filename}")
            logger.info(f"URL: {url}")
            
            content = self.fetch_content(url)
            if content:
                success = self.save_content(content, filename)
                results[filename] = success
            else:
                logger.error(f"无法获取内容: {filename}")
                results[filename] = False
                
        return results

    def fetch_free_nodes_and_save(self) -> bool:
        """抓取并保存 free-nodes 原始订阅为 6.txt。

        链接内容是 base64（含换行）或纯文本，尝试自动识别并保存原文。
        """
        url = globals().get("FREE_NODES_URL")
        if not url:
            logger.warning("未配置 FREE_NODES_URL，跳过 6.txt")
            return False

        content = self.fetch_content(url)
        if not content:
            logger.error("free-nodes 内容获取失败")
            return False

        text_to_save = content
        # 尝试解码base64，如果失败则保留原文
        try:
            # 去除空白后判断是否可能是base64
            compact = "".join(content.strip().split())
            # base64 长度必须是4的倍数，不是则填充
            pad_len = (4 - len(compact) % 4) % 4
            compact_padded = compact + ("=" * pad_len)
            decoded = base64.b64decode(compact_padded, validate=False)
            # 若解码后是可打印文本，则采用解码文本
            try:
                decoded_text = decoded.decode("utf-8", errors="strict")
                # 粗略判断：包含常见协议头则认为是有效
                if any(proto in decoded_text for proto in ("vmess://", "vless://", "trojan://", "ss://")):
                    text_to_save = decoded_text
            except UnicodeDecodeError:
                pass
        except Exception:
            pass

        return self.save_content(text_to_save, "6.txt")

    def fetch_free_nodes_v1_and_save(self) -> bool:
        """抓取并保存 free-nodes v1 订阅为 7.txt。

        链接内容通常为 base64（含换行）或纯文本，尝试自动识别后保存。
        """
        url = globals().get("FREE_NODES_V1_URL")
        if not url:
            logger.warning("未配置 FREE_NODES_V1_URL，跳过 7.txt")
            return False

        content = self.fetch_content(url)
        if not content:
            logger.error("free-nodes v1 内容获取失败")
            return False

        text_to_save = content
        # 尝试解码base64，如果失败则保留原文
        try:
            compact = "".join(content.strip().split())
            pad_len = (4 - len(compact) % 4) % 4
            compact_padded = compact + ("=" * pad_len)
            decoded = base64.b64decode(compact_padded, validate=False)
            try:
                decoded_text = decoded.decode("utf-8", errors="strict")
                if any(proto in decoded_text for proto in ("vmess://", "vless://", "trojan://", "ss://")):
                    text_to_save = decoded_text
            except UnicodeDecodeError:
                pass
        except Exception:
            pass

        return self.save_content(text_to_save, "7.txt")
    
    def run(self) -> bool:
        """
        运行获取器
        
        Returns:
            是否全部成功
        """
        logger.info("开始获取V2ray订阅链接内容")
        
        try:
            results = self.fetch_all_subscriptions()
            
            success_count = sum(1 for success in results.values() if success)
            total_count = len(results)
            
            logger.info(f"获取完成: {success_count}/{total_count} 个文件成功")
            
            # 记录详细结果
            for filename, success in results.items():
                status = "成功" if success else "失败"
                logger.info(f"  {filename}: {status}")
            
            return success_count == total_count
            
        except Exception as e:
            logger.error(f"运行过程中发生错误: {e}")
            return False

def main():
    """主函数"""
    try:
        fetcher = ProxyFetcher()
        success = fetcher.run()
        # 追加 free-nodes 6.txt / 7.txt
        free_nodes_ok = fetcher.fetch_free_nodes_and_save()
        free_nodes_v1_ok = fetcher.fetch_free_nodes_v1_and_save()
        
        if success and free_nodes_ok and free_nodes_v1_ok:
            logger.info("所有订阅链接获取成功（包含 6.txt / 7.txt）")
            sys.exit(0)
        else:
            logger.error("部分订阅链接获取失败（或 6.txt / 7.txt 失败）")
            sys.exit(1)
            
    except KeyboardInterrupt:
        logger.info("用户中断操作")
        sys.exit(1)
    except Exception as e:
        logger.error(f"程序执行失败: {e}")
        sys.exit(1)

if __name__ == "__main__":
    main()
