import argparse
import logging
import re
import requests
import threading
from concurrent.futures import ThreadPoolExecutor, as_completed
from urllib.parse import urlparse, urljoin

from tqdm import tqdm

# --- V4 关键改动 1: 定义全局浏览器请求头 ---
# 模拟浏览器访问，这是解决“致命错误”的关键
HEADERS = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36'
}

# --- 日志设置 ---
def setup_logging(log_file='m3u_checker.log'):
    logger = logging.getLogger()
    logger.setLevel(logging.DEBUG)
    if logger.hasHandlers():
        logger.handlers.clear()
    file_handler = logging.FileHandler(log_file, 'w', 'utf-8')
    file_handler.setLevel(logging.DEBUG)
    file_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(module)s - %(message)s')
    file_handler.setFormatter(file_formatter)
    logger.addHandler(file_handler)
    console_handler = logging.StreamHandler()
    console_handler.setLevel(logging.INFO)
    console_formatter = logging.Formatter('%(levelname)s: %(message)s')
    console_handler.setFormatter(console_formatter)
    logger.addHandler(console_handler)

# --- 频道类定义 ---
class Channel:
    def __init__(self, name, url, group="", logo="", raw_info=""):
        self.name = name
        self.url = url
        self.group = group
        self.logo = logo
        self.raw_info = raw_info
        self.is_alive = False

# --- 功能函数 ---
def get_m3u_content(source_url_or_path: str) -> str:
    parsed_url = urlparse(source_url_or_path)
    if all([parsed_url.scheme, parsed_url.netloc]):
        logging.info(f"正在从 URL 下载 M3U 文件: {source_url_or_path}")
        try:
            # V4 改动: 添加 headers
            response = requests.get(source_url_or_path, timeout=15, headers=HEADERS)
            response.raise_for_status()
            logging.info("M3U 文件下载成功。")
            return response.text
        except requests.RequestException:
            logging.exception(f"致命错误: 无法从 URL '{source_url_or_path}' 下载文件。")
            return None
    else:
        logging.info(f"正在读取本地 M3U 文件: {source_url_or_path}")
        try:
            with open(source_url_or_path, 'r', encoding='utf-8') as f:
                return f.read()
        except Exception:
            logging.exception(f"致命错误: 无法读取本地文件 '{source_url_or_path}'。")
            return None

def parse_m3u(content: str) -> list[Channel]:
    # 此函数逻辑正确，无需修改
    logging.info("开始解析 M3U 内容...")
    channels = []
    lines = content.strip().split('\n')
    if not lines or not lines[0].strip().startswith('#EXTM3U'):
        logging.warning("文件格式可能不正确，缺少 #EXTM3U 头部。")
    i = 0
    while i < len(lines):
        line = lines[i].strip()
        if line.startswith('#EXTINF:'):
            raw_info = line
            tvg_name = re.search(r'tvg-name="([^"]*)"', line)
            group_title = re.search(r'group-title="([^"]*)"', line)
            tvg_logo = re.search(r'tvg-logo="([^"]*)"', line)
            channel_name_match = line.split(',')[-1].strip()
            name = tvg_name.group(1) if tvg_name else channel_name_match
            group = group_title.group(1) if group_title else ""
            logo = tvg_logo.group(1) if tvg_logo else ""
            i += 1
            if i < len(lines) and lines[i].strip().startswith('http'):
                url = lines[i].strip()
                channels.append(Channel(name=name, url=url, group=group, logo=logo, raw_info=raw_info))
            else:
                logging.warning(f"在 #EXTINF 行后未找到有效的URL: '{line}'")
        i += 1
    logging.info(f"解析完成，共找到 {len(channels)} 个频道。")
    return channels

def get_final_stream_url(master_url: str, timeout: int) -> str:
    # 智能处理主M3U的逻辑是正确的，但请求需要修复
    if not (master_url.lower().endswith('.m3u') or master_url.lower().endswith('.m3u8')):
        return master_url
    logging.debug(f"检测到主播放列表: {master_url}，尝试解析...")
    try:
        # V4 改动: 添加 headers
        response = requests.get(master_url, timeout=timeout, headers=HEADERS)
        response.raise_for_status()
        content = response.text
        for line in content.split('\n'):
            line = line.strip()
            if line and not line.startswith('#'):
                final_url = urljoin(master_url, line)
                logging.debug(f"从主播放列表派生出流URL: {final_url}")
                return final_url
        logging.warning(f"主播放列表 {master_url} 为空或不含流地址。")
        return None
    except requests.RequestException as e:
        logging.debug(f"获取主播放列表 {master_url} 失败: {e}")
        return None

def check_url(channel: Channel, timeout: int, stop_event: threading.Event):
    if stop_event.is_set():
        return
    logging.debug(f"开始处理频道: {channel.name} ({channel.url})")
    url_to_check = get_final_stream_url(channel.url, timeout)
    if not url_to_check:
        logging.debug(f"频道 {channel.name} 无法获取最终流地址，标记为无效。")
        channel.is_alive = False
        return
    try:
        if stop_event.is_set():
            return
        
        # --- V4 关键改动 2: 使用 GET(stream=True) 替代 HEAD ---
        # 这更可靠，更能模拟真实播放器的行为
        response = requests.get(url_to_check, timeout=timeout, headers=HEADERS, stream=True)
        response.raise_for_status() # 检查 2xx 状态码
        
        # 只要能成功建立连接并获取到响应头，就认为是可用的
        channel.is_alive = True
        logging.debug(f"成功 - {channel.name} (状态码: {response.status_code})")
        response.close() # 必须关闭连接以释放资源
            
    except requests.exceptions.RequestException as e:
        # 涵盖了超时、连接错误、HTTP错误等所有请求相关问题
        logging.debug(f"失败 - {channel.name} - URL: {url_to_check} - 错误: {e}")
        channel.is_alive = False

def test_channels_concurrently(channels: list, timeout: int, max_workers: int, stop_event: threading.Event):
    # 此函数逻辑正确，无需修改
    logging.info(f"开始并发测试 {len(channels)} 个频道 (超时: {timeout}s, 并发数: {max_workers})")
    logging.info("在检测过程中，按 Ctrl+C 可以随时停止并保存当前结果。")
    with ThreadPoolExecutor(max_workers=max_workers) as executor:
        futures = {executor.submit(check_url, channel, timeout, stop_event) for channel in channels}
        live_count = 0
        progress = tqdm(as_completed(futures), total=len(channels), desc="正在检测")
        for future in progress:
            if stop_event.is_set():
                progress.set_description("正在停止...")
                break
            # 这里不需要获取 future.result()，因为 check_url 直接修改了 channel 对象
            live_count = sum(1 for ch in channels if ch.is_alive)
            progress.set_postfix(available=f"{live_count}")

def write_m3u(channels: list[Channel], output_file: str):
    # 此函数逻辑正确，无需修改
    available_channels = [ch for ch in channels if ch.is_alive]
    logging.info(f"准备将 {len(available_channels)} 个可用频道（保持原始顺序）写入文件: {output_file}")
    try:
        with open(output_file, 'w', encoding='utf-8') as f:
            f.write('#EXTM3U\n')
            for channel in available_channels:
                f.write(f"{channel.raw_info}\n")
                f.write(f"{channel.url}\n")
        logging.info("新 M3U 文件写入成功！")
    except IOError:
        logging.exception(f"致命错误: 无法写入输出文件 '{output_file}'。")

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="M3U 播放列表智能检测器 (v4-稳定版)。")
    parser.add_argument('input', help="M3U 文件的 URL 或本地路径。")
    parser.add_argument('output', help="保存可用频道的新 M3U 文件名。")
    parser.add_argument('-t', '--timeout', type=int, default=5, help="每个频道连接的超时时间（秒）。默认为5秒。")
    parser.add_argument('-w', '--workers', type=int, default=30, help="并发测试的线程数。默认为30。")
    parser.add_argument('--log', default='m3u_checker.log', help="日志文件名。默认为 'm3u_checker.log'。")

    args = parser.parse_args()
    setup_logging(args.log)
    logging.info("脚本开始执行 (v4 - 稳定版)...")
    stop_event = threading.Event()
    all_channels = []
    
    try:
        m3u_content = get_m3u_content(args.input)
        if m3u_content:
            all_channels = parse_m3u(m3u_content)
            if all_channels:
                test_channels_concurrently(all_channels, args.timeout, args.workers, stop_event)
    except KeyboardInterrupt:
        logging.warning("\n检测被用户中止。正在处理已完成的结果...")
        stop_event.set()
    except Exception:
        logging.critical("脚本发生未处理的致命错误。", exc_info=True)
    finally:
        if all_channels:
            live_channels_count = sum(1 for ch in all_channels if ch.is_alive)
            print(f"\n检测完毕！总频道数: {len(all_channels)}, 可用频道数: {live_channels_count}")
            if live_channels_count > 0:
                # 写入时会再次过滤并保持顺序
                write_m3u(all_channels, args.output)
            else:
                logging.info("没有找到任何可用的频道。")
        else:
            logging.error("未能处理任何频道。")
            
    logging.info("脚本执行完毕。")