#!/usr/bin/env python3
"""
剧集自动追更下载脚本 - 专门适配电影天堂网站
使用前请安装依赖：pip install requests beautifulsoup4
"""

import os
import json
import logging
import time
import re
from urllib.parse import urlparse, urljoin
import requests
from bs4 import BeautifulSoup

# === 配置路径 ===
CONFIG_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config.json')
LOG_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'download_log.log')

# === 设置日志 ===
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler(LOG_FILE, encoding='utf-8'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger('AidyttDownloader')


def load_config():
    """加载配置文件"""
    try:
        with open(CONFIG_FILE, 'r', encoding='utf-8') as f:
            config = json.load(f)
        logger.info(f"配置文件加载成功。共有 {len(config['rss_feeds'])} 个剧集待监控。")
        return config
    except Exception as e:
        logger.error(f"加载配置文件失败: {e}")
        return None


def ensure_download_dir(show_name, base_dir):
    """确保剧集的下载目录存在"""
    safe_name = "".join(c for c in show_name if c not in r'<>:"/\\|?*')
    show_dir = os.path.join(base_dir, safe_name)
    try:
        os.makedirs(show_dir, exist_ok=True)
        logger.debug(f"目录已就绪: {show_dir}")
        return show_dir
    except Exception as e:
        logger.error(f"创建目录 {show_dir} 失败: {e}")
        return None


def is_file_downloaded(show_dir, episode_title):
    """
    检查文件是否已下载
    通过检查本地是否存在包含剧集标题的文件来判断
    """
    try:
        for filename in os.listdir(show_dir):
            # 清理文件名进行比较
            clean_filename = re.sub(r'[<>:"/\\|?*]', '', filename)
            clean_episode = re.sub(r'[<>:"/\\|?*]', '', episode_title)

            if clean_episode in clean_filename:
                logger.info(f"文件已存在: {filename}")
                return True
        return False
    except FileNotFoundError:
        return False


def thunder_to_http(thunder_url):
    """将迅雷链接转换为HTTP链接"""
    if thunder_url.startswith('thunder://'):
        import base64
        try:
            # 去掉thunder://前缀，然后base64解码
            encoded_url = thunder_url[10:]
            # 补上等号使base64解码正常
            padding = 4 - len(encoded_url) % 4
            if padding != 4:
                encoded_url += '=' * padding

            decoded_url = base64.b64decode(encoded_url).decode('utf-8', errors='ignore')
            # 去掉AA和ZZ前缀后缀
            if decoded_url.startswith('AA') and decoded_url.endswith('ZZ'):
                http_url = decoded_url[2:-2]
            else:
                http_url = decoded_url

            logger.info(f"转换后的HTTP链接: {http_url}")
            return http_url

        except Exception as e:
            logger.error(f"转换迅雷链接失败: {e}")
            return thunder_url
    return thunder_url


def download_with_thunder(thunder_url, download_path):
    """使用迅雷下载（创建.thunderdl文件）"""
    try:
        # 创建迅雷下载任务文件
        thunder_content = f'''[InternetShortcut]
URL={thunder_url}
'''

        with open(download_path, 'w', encoding='utf-8') as f:
            f.write(thunder_content)

        logger.info(f"迅雷下载任务已创建: {download_path}")
        return True
    except Exception as e:
        logger.error(f"创建迅雷下载任务失败: {e}")
        return False


def download_direct_url(direct_url, download_path):
    """直接下载文件"""
    try:
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Referer': 'https://www.aidytt.com/',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Cache-Control': 'no-cache',
            'Pragma': 'no-cache'
        }

        logger.info(f"开始下载: {direct_url}")
        response = requests.get(direct_url, stream=True, headers=headers, timeout=60)
        response.raise_for_status()

        file_size = int(response.headers.get('content-length', 0))
        downloaded = 0

        with open(download_path, 'wb') as f:
            for chunk in response.iter_content(chunk_size=8192):
                if chunk:
                    f.write(chunk)
                    downloaded += len(chunk)
                    if file_size > 0:
                        progress = (downloaded / file_size) * 100
                        # 每5%显示一次进度，且避免重复显示
                        if int(progress) % 5 == 0 :
                            # 避免同一进度点重复显示
                            if downloaded == len(chunk) or int(progress) != int(
                                    (downloaded - len(chunk)) / file_size * 100):
                                logger.info(f"下载进度: {progress:.1f}%")

        logger.info(f"直接下载成功: {download_path}")
        return True
    except Exception as e:
        logger.error(f"直接下载失败 {direct_url}: {e}")
        return False


def parse_aidytt_page(url):
    """解析电影天堂页面，提取所有下载链接"""
    try:
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Referer': 'https://www.aidytt.com/',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8'
        }

        logger.info(f"正在解析页面: {url}")
        response = requests.get(url, headers=headers, timeout=30)
        response.raise_for_status()
        response.encoding = 'utf-8'

        soup = BeautifulSoup(response.text, 'html.parser')

        episodes = []

        # 方法1: 查找包含迅雷链接的input元素
        thunder_inputs = soup.find_all('input', {'value': re.compile(r'thunder://')})
        logger.info(f"找到 {len(thunder_inputs)} 个迅雷链接input元素")

        for i, input_elem in enumerate(thunder_inputs):
            input_value = input_elem.get('value', '')
            logger.info(f"第{i + 1}个input值: {input_value}")

            # 解析剧集标题和链接
            if '：' in input_value:
                episode_title, thunder_url = input_value.split('：', 1)
            else:
                # 尝试从相邻元素获取标题
                episode_title = "未知剧集"
                # 查找同级的a标签
                sibling_a = input_elem.find_next_sibling('a')
                if sibling_a and sibling_a.get_text(strip=True):
                    episode_title = sibling_a.get_text(strip=True)
                elif input_elem.find_previous('a'):
                    prev_a = input_elem.find_previous('a')
                    episode_title = prev_a.get_text(strip=True)

                thunder_url = input_value

            # 清理标题
            episode_title = episode_title.strip()
            thunder_url = thunder_url.strip()

            # 转换迅雷链接为HTTP链接
            http_url = thunder_to_http(thunder_url)

            episodes.append({
                'title': episode_title,
                'thunder_url': thunder_url,
                'http_url': http_url,
                'is_direct_download': http_url.startswith('http') and not http_url.startswith('thunder://')
            })

        # 方法2: 查找下载按钮的a标签
        download_links = soup.find_all('a', href=re.compile(r'thunder://'))
        logger.info(f"找到 {len(download_links)} 个迅雷下载链接")

        for i, link in enumerate(download_links):
            thunder_url = link.get('href', '')
            episode_title = link.get_text(strip=True) or f"剧集{i + 1}"

            http_url = thunder_to_http(thunder_url)

            episodes.append({
                'title': episode_title,
                'thunder_url': thunder_url,
                'http_url': http_url,
                'is_direct_download': http_url.startswith('http') and not http_url.startswith('thunder://')
            })

        # 方法3: 查找其他可能的下载链接（例如直接HTTP链接）
        direct_links = soup.find_all('a', href=re.compile(r'https?://'))
        logger.info(f"找到 {len(direct_links)} 个直接HTTP链接")

        for i, link in enumerate(direct_links):
            http_url = link.get('href', '')
            episode_title = link.get_text(strip=True) or f"剧集{i + 1}"

            if http_url and not http_url.startswith('thunder://'):
                episodes.append({
                    'title': episode_title,
                    'thunder_url': '',
                    'http_url': http_url,
                    'is_direct_download': True
                })

        logger.info(f"总共找到 {len(episodes)} 个剧集")

        # 去重
        unique_episodes = []
        seen_titles = set()
        for episode in episodes:
            if episode['title'] not in seen_titles:
                unique_episodes.append(episode)
                seen_titles.add(episode['title'])

        logger.info(f"去重后剩余 {len(unique_episodes)} 个剧集")
        return unique_episodes

    except Exception as e:
        logger.error(f"解析页面失败: {e}")
        import traceback
        logger.error(traceback.format_exc())
        return []


def check_for_updates(feed_config, show_dir):
    """检查是否有新剧集"""
    webpage_url = feed_config['url']

    episodes = parse_aidytt_page(webpage_url)

    if not episodes:
        logger.warning(f"未找到任何剧集下载链接")
        return False

    new_episodes = []

    for episode in episodes:
        episode_title = episode['title']

        if is_file_downloaded(show_dir, episode_title):
            logger.info(f"剧集已存在: {episode_title}")
            continue

        logger.info(f"发现新剧集: {episode_title}")
        new_episodes.append(episode)

    # 按标题排序（按日期顺序）
    new_episodes.sort(key=lambda x: x['title'])

    for episode in new_episodes:
        episode_title = episode['title']
        thunder_url = episode['thunder_url']
        http_url = episode['http_url']

        # 创建安全的文件名
        safe_title = "".join(c for c in episode_title if c not in r'<>:"/\\|?*').strip()

        if episode['is_direct_download']:
            # 直接HTTP下载
            file_extension = os.path.splitext(urlparse(http_url).path)[1] or '.mp4'
            download_filename = f"{safe_title}{file_extension}"
            download_path = os.path.join(show_dir, download_filename)

            if download_direct_url(http_url, download_path):
                logger.info(f"成功下载: {episode_title}")
            else:
                # 如果直接下载失败，创建迅雷任务
                download_filename = f"{safe_title}.thunderdl"
                download_path = os.path.join(show_dir, download_filename)
                download_with_thunder(thunder_url, download_path)
        else:
            # 创建迅雷下载任务
            download_filename = f"{safe_title}.thunderdl"
            download_path = os.path.join(show_dir, download_filename)
            download_with_thunder(thunder_url, download_path)

    return len(new_episodes) > 0


def main():
    """主函数"""
    logger.info("=== 电影天堂剧集追更检查开始 ===")
    config = load_config()
    if not config:
        return

    base_dir = config['download_base_dir']
    os.makedirs(base_dir, exist_ok=True)

    for feed_config in config['rss_feeds']:
        show_name = feed_config['name']
        logger.info(f"正在检查剧集: {show_name}")

        show_dir = ensure_download_dir(show_name, base_dir)
        if not show_dir:
            continue

        try:
            if check_for_updates(feed_config, show_dir):
                logger.info(f"剧集 '{show_name}' 有新内容已下载")
            else:
                logger.info(f"剧集 '{show_name}' 暂无更新")

        except Exception as e:
            logger.error(f"检查剧集 '{show_name}' 时出错: {e}")
            import traceback
            logger.error(traceback.format_exc())

    logger.info("=== 剧集追更检查结束 ===\n")


if __name__ == "__main__":
    main()