import os
import re
import requests
from concurrent.futures import ThreadPoolExecutor
from urllib.parse import urlparse
from pathlib import Path
from lxml import html
import time

def clean_filename(filename):
    """清理文件名，移除或替换不支持的字符"""
    # 替换Windows不支持的文件名字符
    invalid_chars = r'[<>:"/\\|?*]'
    # 将冒号替换为破折号
    filename = filename.replace(':', ' __')
    # 移除其他不支持的字符
    filename = re.sub(invalid_chars, '', filename)
    # 移除首尾的空格和点
    filename = filename.strip('. ')
    # 如果文件名为空，使用默认名称
    if not filename:
        filename = "unnamed_podcast"
    return filename

def extract_number_from_filename(filename):
    """从文件名中提取数字"""
    match = re.search(r'TPP(\d+)-converted', filename)
    if match:
        return match.group(1)
    return None

def get_podcast_info(url):
    """从网页获取播客信息"""
    try:
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'en-US,en;q=0.5',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
        }
        
        print(f"正在请求URL: {url}")
        response = requests.get(url, headers=headers, timeout=10)
        print(f"响应状态码: {response.status_code}")
        
        if response.status_code != 200:
            print(f"请求失败，状态码: {response.status_code}")
            return []
        
        # 保存响应内容到文件以便检查
        with open('response.html', 'w', encoding='utf-8') as f:
            f.write(response.text)
        print("已将响应内容保存到 response.html")
        
        # 解析HTML
        tree = html.fromstring(response.content)
        
        # 尝试不同的选择器来获取播客条目
        podcast_containers = []
        
        # 方法1：使用episode-item类
        containers = tree.xpath('//div[contains(@class, "episode-item")]')
        print(f"方法1找到 {len(containers)} 个播客条目")
        if containers:
            podcast_containers = containers
        
        # 方法2：使用episode类
        if not podcast_containers:
            containers = tree.xpath('//div[contains(@class, "episode")]')
            print(f"方法2找到 {len(containers)} 个播客条目")
            if containers:
                podcast_containers = containers
        
        # 方法3：使用article标签
        if not podcast_containers:
            containers = tree.xpath('//article')
            print(f"方法3找到 {len(containers)} 个播客条目")
            if containers:
                podcast_containers = containers
        
        # 方法4：使用包含标题的容器
        if not podcast_containers:
            containers = tree.xpath('//div[.//span[contains(@class, "cp-playlist-episode-title")]]')
            print(f"方法4找到 {len(containers)} 个播客条目")
            if containers:
                podcast_containers = containers
        
        if not podcast_containers:
            print("未找到任何播客条目，尝试直接获取标题和链接")
            # 直接获取所有标题和链接
            titles = tree.xpath('//span[contains(@class, "cp-playlist-episode-title")]/text()')
            links = tree.xpath('//a[contains(@href, ".mp3")]/@href')
            print(f"找到 {len(titles)} 个标题和 {len(links)} 个链接")
            
            if titles and links:
                podcast_info = []
                for i in range(min(len(titles), len(links))):
                    try:
                        title = titles[i].strip()
                        download_link = links[i]
                        
                        # 从下载链接中提取序号
                        filename = os.path.basename(urlparse(download_link).path)
                        number = extract_number_from_filename(filename)
                        if not number:
                            print(f"无法从文件名中提取数字: {filename}")
                            continue
                        
                        podcast_info.append({
                            'title': title,
                            'url': download_link,
                            'duration': "未知时长",
                            'number': number
                        })
                        print(f"成功解析: {number} - {title} - {download_link}")
                    except Exception as e:
                        print(f"处理条目时出错: {str(e)}")
                        continue
                
                return podcast_info
        
        podcast_info = []
        for container in podcast_containers:
            try:
                # 打印容器内容以便调试
                print("\n处理容器:")
                print(html.tostring(container, encoding='unicode')[:200])
                
                # 从同一个容器中获取标题
                title = container.xpath('.//span[contains(@class, "cp-playlist-episode-title")]/text()')
                if not title:
                    continue
                title = title[0].strip()
                
                # 从同一个容器中获取下载链接
                download_link = container.xpath('.//a[contains(@href, ".mp3")]/@href')
                if not download_link:
                    continue
                download_link = download_link[0]
                
                # 从下载链接中提取序号
                filename = os.path.basename(urlparse(download_link).path)
                number = extract_number_from_filename(filename)
                if not number:
                    print(f"无法从文件名中提取数字: {filename}")
                    continue
                
                # 从同一个容器中获取时长
                duration = container.xpath('.//span[contains(@class, "duration")]/text()')
                duration = duration[0].strip() if duration else "未知时长"
                
                podcast_info.append({
                    'title': title,
                    'url': download_link,
                    'duration': duration,
                    'number': number
                })
                print(f"成功解析: {number} - {title} - {download_link}")
                
            except (IndexError, AttributeError) as e:
                print(f"解析播客条目时出错: {str(e)}")
                continue
        
        return podcast_info
    except Exception as e:
        print(f"获取播客信息时出错: {str(e)}")
        return []

def download_and_rename(url, podcast_name, number):
    """下载并重命名播客文件"""
    try:
        # 清理播客名称
        clean_podcast_name = clean_filename(podcast_name)
        
        # 创建新的文件名
        new_filename = f"{number}_{clean_podcast_name}.mp3"
        
        # 检查文件是否已存在
        if os.path.exists(new_filename):
            print(f"文件已存在，跳过下载: {new_filename}")
            return
        
        # 下载文件
        response = requests.get(url, stream=True)
        response.raise_for_status()
        
        # 保存文件
        with open(new_filename, 'wb') as f:
            for chunk in response.iter_content(chunk_size=8192):
                if chunk:
                    f.write(chunk)
        
        print(f"成功下载并重命名: {new_filename}")
        
    except Exception as e:
        print(f"处理 {url} 时出错: {str(e)}")

def main():
    # 播客网页URL
    podcast_url = "https://player.captivate.fm/show/1749da6a-9a89-4f1d-bd30-65eb9a749b60/"
    
    # 获取播客信息
    print("正在获取播客信息...")
    podcast_info = get_podcast_info(podcast_url)
    
    if not podcast_info:
        print("未能获取到播客信息")
        return
    
    print(f"\n成功获取到 {len(podcast_info)} 个播客信息")
    
    # 创建下载目录
    download_dir = "Podcast_TechPolicy"
    os.makedirs(download_dir, exist_ok=True)
    os.chdir(download_dir)
    
    # 使用线程池下载文件
    with ThreadPoolExecutor(max_workers=5) as executor:
        for info in podcast_info:
            executor.submit(download_and_rename, info['url'], info['title'], info['number'])
            # 添加短暂延迟，避免请求过于频繁
            time.sleep(0.5)

if __name__ == "__main__":
    main() 