# -*- coding: utf-8 -*-

import asyncio
import aiohttp
import aiofiles
import time
import threading
import requests
import os
import hashlib
import random
import subprocess
import base64
import csv
from pathlib import Path
from urllib.parse import urlparse
from scrapy import signals
from scrapy.exceptions import DropItem
from itemadapter import ItemAdapter
import logging
from concurrent.futures import ThreadPoolExecutor, as_completed

from fire_control_spider.items import WebPageItem, MediaItem
from fire_control_spider.utils import FileUtils, ContentProcessor


class AsyncFileDownloader:
    """异步文件下载器"""
    
    def __init__(self, output_dir: str, download_delay_range=(0.1, 0.5), max_workers=5):
        self.output_dir = Path(output_dir)
        self.download_delay_range = download_delay_range
        self.max_workers = max_workers
        self.download_queue = []
        self.download_thread = None
        self.running = True
        self.lock = threading.Lock()
        self.downloaded_files = {}  # sha256 -> local_path
        self.logger = logging.getLogger(__name__)
        
    def start_download_thread(self):
        """启动下载线程"""
        self.download_thread = threading.Thread(target=self._download_worker)
        self.download_thread.daemon = True
        self.download_thread.start()
        print("🚀 异步文件下载器已启动")
    
    def _download_worker(self):
        """下载工作线程"""
        with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
            while self.running:
                with self.lock:
                    if self.download_queue:
                        download_task = self.download_queue.pop(0)
                    else:
                        download_task = None
                
                if download_task:
                    url, file_path, media_type, record_type, site_dir, callback = download_task
                    try:
                        print(f"📥 开始异步下载: {url}")
                        success = self._download_file_sync(url, file_path, media_type, record_type, site_dir)
                        if callback:
                            callback(success, url, file_path)
                    except Exception as e:
                        print(f"❌ 下载失败: {url} - {e}")
                        if callback:
                            callback(False, url, file_path)
                else:
                    time.sleep(0.1)  # 避免空转
    
    def _download_file_sync(self, url, file_path, media_type, record_type, site_dir):
        """同步下载文件（在工作线程中执行）"""
        try:
            # 处理data:资源
            if url.startswith('data:'):
                return self._download_data_url_sync(url, file_path, media_type, record_type, site_dir)
            
            # 处理YouTube视频
            if 'youtube.com/watch' in url:
                return self._download_youtube_video_sync(url, file_path, media_type, record_type, site_dir)
            
            # 处理Brightcove视频
            if 'brightcove.net' in url:
                return self._download_brightcove_video_sync(url, file_path, media_type, record_type, site_dir)
            
            # 随机延迟，避免被检测
            time.sleep(random.uniform(*self.download_delay_range))
            
            # 计算sha256哈希
            clean_url = str(url).strip()
            file_hash = hashlib.sha256(clean_url.encode('utf-8')).hexdigest().lower()
            
            # 获取文件扩展名
            parsed_url = urlparse(url)
            path = parsed_url.path
            file_extension = ''
            
            # 从URL路径中提取扩展名
            if '.' in path:
                file_extension = '.' + path.split('.')[-1].lower()
            
            # 如果没有扩展名，尝试从Content-Type推断
            if not file_extension:
                file_extension = self._get_extension_from_content_type(url, media_type)
            
            # 确保目录存在
            os.makedirs(os.path.dirname(file_path), exist_ok=True)
            
            # 构建完整的文件路径
            file_filename = f"{file_hash}{file_extension}"
            full_file_path = Path(file_path).parent / file_filename
            
            # 检查文件是否已存在
            if full_file_path.exists():
                print(f"文件已存在: {full_file_path}")
                return True
            
            # 下载文件 - 使用更隐蔽的方式
            # 随机User-Agent
            user_agents = [
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0',
                'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:89.0) Gecko/20100101 Firefox/89.0'
            ]
            
            # 构建请求头
            headers = self._get_headers_for_media_type(media_type, parsed_url)
            
            # 使用session保持连接
            session = requests.Session()
            session.headers.update(headers)
            session.headers['User-Agent'] = random.choice(user_agents)
            
            # 对于IAFF网站，使用curl下载
            if 'iaff.org' in url:
                try:
                    # 使用curl下载文件
                    curl_cmd = [
                        'curl', '-L', '-o', str(full_file_path),
                        '-H', 'User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
                        '-H', f'Referer: {parsed_url.scheme}://{parsed_url.netloc}/',
                        '--connect-timeout', '30',
                        '--max-time', '60',
                        url
                    ]
                    
                    print(f"执行curl命令: {' '.join(curl_cmd)}")
                    result = subprocess.run(curl_cmd, capture_output=True, text=True)
                    
                    if result.returncode == 0 and full_file_path.exists():
                        print(f"使用curl下载{media_type}成功: {url} -> {full_file_path}")
                        # 记录到对应的CSV文件
                        if record_type in ["image", "video"]:
                            self._record_media_url(file_hash, url, site_dir, file_extension, record_type)
                        return True
                    else:
                        print(f"curl下载失败: {result.stderr}")
                        raise Exception(f"curl下载失败: {result.stderr}")
                        
                except Exception as e:
                    self.logger.warning(f"curl下载{media_type}失败: {e}")
                    print(f"回退到requests下载: {url}")
                    # 如果curl失败，回退到requests
                    response = session.get(url, timeout=30)
                    response.raise_for_status()
                    
                    with open(full_file_path, 'wb') as f:
                        f.write(response.content)
            else:
                # 添加随机延迟
                time.sleep(random.uniform(*self.download_delay_range))
                
                response = session.get(url, timeout=30)
                response.raise_for_status()
                
                with open(full_file_path, 'wb') as f:
                    f.write(response.content)
                
            print(f"下载{media_type}: {url} -> {full_file_path}")
            
            # 记录到对应的CSV文件
            if record_type in ["image", "video"]:
                self._record_media_url(file_hash, url, site_dir, file_extension, record_type)
            
            return True
                
        except Exception as e:
            self.logger.warning(f"下载{media_type}失败: {e}")
            return False
    
    def _download_data_url_sync(self, data_url, file_path, media_type, record_type, site_dir):
        """下载data:URL资源"""
        try:
            # 解析data:URL
            if ',' not in data_url:
                self.logger.warning(f"无效的data:URL格式: {data_url}")
                return False
            
            header, data = data_url.split(',', 1)
            
            # 提取MIME类型和编码
            mime_type = 'application/octet-stream'
            encoding = None
            
            if ';' in header:
                parts = header.split(';')
                mime_type = parts[0].replace('data:', '')
                if 'base64' in parts:
                    encoding = 'base64'
            
            # 根据MIME类型确定扩展名
            file_extension = self._get_extension_from_mime_type(mime_type)
            
            # 解码数据
            if encoding == 'base64':
                file_data = base64.b64decode(data)
            else:
                file_data = data.encode('utf-8')
            
            # 使用SHA256哈希作为文件名
            clean_url = str(data_url).strip()
            file_hash = hashlib.sha256(clean_url.encode('utf-8')).hexdigest().lower()
            file_filename = f"{file_hash}{file_extension}"
            full_file_path = Path(file_path).parent / file_filename
            
            # 确保目录存在
            os.makedirs(os.path.dirname(full_file_path), exist_ok=True)
            
            # 检查文件是否已存在
            if full_file_path.exists():
                print(f"data:文件已存在: {full_file_path}")
                return True
            
            # 写入文件
            with open(full_file_path, 'wb') as f:
                f.write(file_data)
            
            print(f"下载data:{media_type}: {data_url[:50]}... -> {full_file_path}")
            
            # 记录到对应的CSV文件
            if record_type in ["image", "video"]:
                self._record_media_url(file_hash, data_url, site_dir, file_extension, record_type)
            
            return True
                
        except Exception as e:
            self.logger.warning(f"下载data:{media_type}失败: {e}")
            return False
    
    def _download_youtube_video_sync(self, youtube_url, file_path, media_type, record_type, site_dir):
        """下载YouTube视频"""
        try:
            # 计算sha256哈希
            clean_url = str(youtube_url).strip()
            file_hash = hashlib.sha256(clean_url.encode('utf-8')).hexdigest().lower()
            
            # 文件路径
            file_filename = f"{file_hash}.mp4"
            full_file_path = Path(file_path).parent / file_filename
            
            # 确保目录存在
            os.makedirs(os.path.dirname(full_file_path), exist_ok=True)
            
            if full_file_path.exists():
                print(f"YouTube视频已存在: {full_file_path}")
                return True
            
            # 随机延迟
            time.sleep(random.uniform(*self.download_delay_range))
            
            # 使用yt-dlp下载YouTube视频
            yt_dlp_cmd = [
                'yt-dlp',
                '--format', 'best[ext=mp4]/best',
                '--output', str(full_file_path),
                '--no-playlist',
                youtube_url
            ]
            
            print(f"开始下载YouTube视频: {youtube_url}")
            
            result = subprocess.run(yt_dlp_cmd, capture_output=True, text=True)
            
            if result.returncode == 0 and full_file_path.exists():
                print(f"✅ YouTube视频下载成功: {youtube_url} -> {full_file_path}")
                
                # 记录到对应的CSV文件
                if record_type in ["image", "video"]:
                    self._record_media_url(file_hash, youtube_url, site_dir, '.mp4', record_type)
                return True
            else:
                print(f"❌ YouTube视频下载失败")
                self.logger.warning(f"YouTube视频下载失败: {youtube_url}")
                return False
                
        except Exception as e:
            self.logger.warning(f"下载YouTube视频失败: {e}")
            print(f"❌ YouTube视频下载异常: {youtube_url} - {e}")
            return False
    
    def _download_brightcove_video_sync(self, brightcove_url, file_path, media_type, record_type, site_dir):
        """下载Brightcove视频"""
        try:
            # 计算sha256哈希
            clean_url = str(brightcove_url).strip()
            file_hash = hashlib.sha256(clean_url.encode('utf-8')).hexdigest().lower()
            
            # 文件路径
            file_filename = f"{file_hash}.mp4"
            full_file_path = Path(file_path).parent / file_filename
            
            # 确保目录存在
            os.makedirs(os.path.dirname(full_file_path), exist_ok=True)
            
            if full_file_path.exists():
                print(f"Brightcove视频已存在: {full_file_path}")
                return True
            
            # 随机延迟
            time.sleep(random.uniform(*self.download_delay_range))
            
            # 使用yt-dlp下载Brightcove视频
            yt_dlp_cmd = [
                'yt-dlp',
                '--format', 'best[ext=mp4]/best',
                '--output', str(full_file_path),
                '--no-playlist',
                brightcove_url
            ]
            
            print(f"开始下载Brightcove视频: {brightcove_url}")
            
            result = subprocess.run(yt_dlp_cmd, capture_output=True, text=True)
            
            if result.returncode == 0 and full_file_path.exists():
                print(f"✅ Brightcove视频下载成功: {brightcove_url} -> {full_file_path}")
                
                # 记录到对应的CSV文件
                if record_type in ["image", "video"]:
                    self._record_media_url(file_hash, brightcove_url, site_dir, '.mp4', record_type)
                return True
            else:
                print(f"❌ Brightcove视频下载失败")
                self.logger.warning(f"Brightcove视频下载失败: {brightcove_url}")
                return False
                
        except Exception as e:
            self.logger.warning(f"下载Brightcove视频失败: {e}")
            print(f"❌ Brightcove视频下载异常: {brightcove_url} - {e}")
            return False
    
    def _get_extension_from_mime_type(self, mime_type: str) -> str:
        """根据MIME类型获取文件扩展名"""
        mime_to_ext = {
            'image/jpeg': '.jpg',
            'image/jpg': '.jpg',
            'image/png': '.png',
            'image/gif': '.gif',
            'image/webp': '.webp',
            'image/svg+xml': '.svg',
            'video/mp4': '.mp4',
            'video/webm': '.webm',
            'video/ogg': '.ogg',
            'audio/mpeg': '.mp3',
            'audio/wav': '.wav',
            'audio/ogg': '.ogg',
            'application/pdf': '.pdf',
            'text/plain': '.txt',
            'application/json': '.json',
            'application/xml': '.xml',
        }
        
        return mime_to_ext.get(mime_type, '.bin')
    
    def _get_extension_from_content_type(self, file_url: str, media_type: str) -> str:
        """从Content-Type推断文件扩展名"""
        try:
            # 随机User-Agent
            user_agents = [
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0',
                'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:89.0) Gecko/20100101 Firefox/89.0'
            ]
            
            parsed_url = urlparse(file_url)
            
            headers = self._get_headers_for_media_type(media_type, parsed_url)
            headers['User-Agent'] = random.choice(user_agents)
            
            head_response = requests.head(file_url, headers=headers, timeout=10)
            content_type = head_response.headers.get('content-type', '')
            
            # 根据媒体类型和content-type推断扩展名
            if media_type == "image":
                if 'jpeg' in content_type or 'jpg' in content_type:
                    return '.jpg'
                elif 'png' in content_type:
                    return '.png'
                elif 'gif' in content_type:
                    return '.gif'
                elif 'webp' in content_type:
                    return '.webp'
                else:
                    return '.jpg'
            elif media_type == "video":
                if 'mp4' in content_type:
                    return '.mp4'
                elif 'avi' in content_type:
                    return '.avi'
                elif 'mov' in content_type:
                    return '.mov'
                elif 'wmv' in content_type:
                    return '.wmv'
                else:
                    return '.mp4'
            elif media_type == "audio":
                if 'mp3' in content_type:
                    return '.mp3'
                elif 'wav' in content_type:
                    return '.wav'
                elif 'ogg' in content_type:
                    return '.ogg'
                elif 'aac' in content_type:
                    return '.aac'
                else:
                    return '.mp3'
            else:  # main_file, attachment_file
                if 'pdf' in content_type:
                    return '.pdf'
                elif 'image' in content_type:
                    return '.jpg'
                elif 'video' in content_type:
                    return '.mp4'
                elif 'audio' in content_type:
                    return '.mp3'
                else:
                    return '.pdf'
        except:
            # 默认扩展名
            if media_type == "image":
                return '.jpg'
            elif media_type == "video":
                return '.mp4'
            elif media_type == "audio":
                return '.mp3'
            else:
                return '.pdf'
    
    def _get_headers_for_media_type(self, media_type: str, parsed_url) -> dict:
        """根据媒体类型获取请求头"""
        base_headers = {
            'Accept-Language': 'en-US,en;q=0.9',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Referer': f'{parsed_url.scheme}://{parsed_url.netloc}/',
            'Cache-Control': 'no-cache',
            'Pragma': 'no-cache',
            'DNT': '1',
            'Upgrade-Insecure-Requests': '1',
        }
        
        if media_type == "image":
            base_headers.update({
                'Accept': 'image/webp,image/apng,image/*,*/*;q=0.8',
                'Sec-Fetch-Dest': 'image',
                'Sec-Fetch-Mode': 'no-cors',
                'Sec-Fetch-Site': 'same-origin',
                'Sec-Fetch-User': '?1',
            })
        elif media_type == "video":
            base_headers.update({
                'Accept': 'video/webm,video/ogg,video/*;q=0.9,application/ogg;q=0.7,audio/*;q=0.6,*/*;q=0.5',
                'Sec-Fetch-Dest': 'video',
                'Sec-Fetch-Mode': 'no-cors',
                'Sec-Fetch-Site': 'same-origin',
                'Sec-Fetch-User': '?1',
            })
        elif media_type == "audio":
            base_headers.update({
                'Accept': 'audio/webm,audio/ogg,audio/*;q=0.9,application/ogg;q=0.7,*/*;q=0.5',
                'Sec-Fetch-Dest': 'audio',
                'Sec-Fetch-Mode': 'no-cors',
                'Sec-Fetch-Site': 'same-origin',
            })
        else:  # main_file, attachment_file
            base_headers.update({
                'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
                'Sec-Fetch-Dest': 'document',
                'Sec-Fetch-Mode': 'navigate',
                'Sec-Fetch-Site': 'same-origin',
            })
        
        return base_headers
    
    def _record_media_url(self, file_hash: str, file_url: str, site_dir: Path, file_extension: str, record_type: str):
        """记录媒体URL到CSV文件"""
        if record_type == "image":
            self._record_image_url(file_hash, file_url, site_dir, file_extension)
        elif record_type == "video":
            self._record_video_url(file_hash, file_url, site_dir, file_extension)
    
    def _record_image_url(self, image_hash: str, image_url: str, site_dir: Path, file_extension: str):
        """记录图片URL到CSV文件"""
        try:
            image_urls_dir = site_dir / "image_urls"
            image_urls_dir.mkdir(parents=True, exist_ok=True)
            
            # 构建相对路径
            relative_path = f"image/{image_hash}{file_extension}"
            
            # 查找现有的CSV文件
            csv_files = list(image_urls_dir.glob("image_urls_*.csv"))
            csv_file = None
            
            if csv_files:
                # 检查最后一个文件的行数
                last_file = sorted(csv_files)[-1]
                with open(last_file, 'r', encoding='utf-8') as f:
                    line_count = sum(1 for _ in f)
                
                if line_count < 100000:
                    csv_file = last_file
                else:
                    # 创建新文件
                    file_num = len(csv_files) + 1
                    start_range = (file_num - 1) * 100000 + 1
                    end_range = file_num * 100000
                    csv_file = image_urls_dir / f"image_urls_{start_range:06d}_{end_range:06d}.csv"
            else:
                # 创建第一个文件
                csv_file = image_urls_dir / "image_urls_000001_100000.csv"
            
            # 写入CSV: sha256+后缀, image_url, 相对路径
            with open(csv_file, 'a', newline='', encoding='utf-8') as f:
                writer = csv.writer(f)
                writer.writerow([f"{image_hash}{file_extension}", image_url, relative_path])
                
        except Exception as e:
            pass
    
    def _record_video_url(self, video_hash: str, video_url: str, site_dir: Path, file_extension: str):
        """记录视频URL到CSV文件"""
        try:
            video_urls_dir = site_dir / "video_urls"
            video_urls_dir.mkdir(parents=True, exist_ok=True)
            
            # 构建相对路径
            relative_path = f"video/{video_hash}{file_extension}"
            
            # 查找现有的CSV文件
            csv_files = list(video_urls_dir.glob("video_urls_*.csv"))
            csv_file = None
            
            if csv_files:
                # 检查最后一个文件的行数
                last_file = sorted(csv_files)[-1]
                with open(last_file, 'r', encoding='utf-8') as f:
                    line_count = sum(1 for _ in f)
                
                if line_count < 100000:
                    csv_file = last_file
                else:
                    # 创建新文件
                    file_num = len(csv_files) + 1
                    start_range = (file_num - 1) * 100000 + 1
                    end_range = file_num * 100000
                    csv_file = video_urls_dir / f"video_urls_{start_range:06d}_{end_range:06d}.csv"
            else:
                # 创建第一个文件
                csv_file = video_urls_dir / "video_urls_000001_100000.csv"
            
            # 写入CSV: sha256+后缀, video_url, 相对路径
            with open(csv_file, 'a', newline='', encoding='utf-8') as f:
                writer = csv.writer(f)
                writer.writerow([f"{video_hash}{file_extension}", video_url, relative_path])
                
        except Exception as e:
            pass
    
    def add_download_task(self, url, file_path, media_type, record_type, site_dir, callback=None):
        """添加下载任务到队列"""
        with self.lock:
            self.download_queue.append((url, file_path, media_type, record_type, site_dir, callback))
        print(f"📋 添加下载任务: {url}")
    
    def stop(self):
        """停止下载器"""
        self.running = False
        if self.download_thread:
            self.download_thread.join()


class AsyncMediaDownloadPipeline:
    """异步媒体文件下载Pipeline - 不阻塞爬虫"""
    
    def __init__(self, output_dir: str, download_delay_range=(0.1, 0.5), max_workers=5):
        self.output_dir = Path(output_dir)
        self.download_delay_range = download_delay_range
        self.max_workers = max_workers
        self.downloader = None
        self.download_stats = {
            'total_files': 0,
            'successful_downloads': 0,
            'failed_downloads': 0
        }
    
    @classmethod
    def from_crawler(cls, crawler):
        # 从配置中获取延迟设置，默认为(0.1, 0.5)
        download_delay_range = crawler.settings.get('DOWNLOAD_DELAY_RANGE', (0.1, 0.5))
        # 从配置中获取并发数，默认为5
        max_workers = crawler.settings.get('MEDIA_DOWNLOAD_MAX_WORKERS', 5)
        return cls(
            output_dir=crawler.settings.get("OUTPUT_DIR", "outputs"),
            download_delay_range=download_delay_range,
            max_workers=max_workers
        )
    
    def open_spider(self, spider):
        """初始化爬虫时调用"""
        self.downloader = AsyncFileDownloader(
            self.output_dir, 
            self.download_delay_range, 
            self.max_workers
        )
        self.downloader.start_download_thread()
        print("🔧 异步下载管道已初始化")
    
    def close_spider(self, spider):
        """关闭爬虫时调用"""
        if self.downloader:
            self.downloader.stop()
        print("🔧 异步下载管道已关闭")
    
    def process_item(self, item, spider):
        """处理item - 不阻塞爬虫"""
        if isinstance(item, WebPageItem):
            adapter = ItemAdapter(item)
            
            # 使用爬虫名称来确定站点目录
            # 从spider_config.json中获取对应的site_name
            from fire_control_spider.config import create_default_config
            config = create_default_config()
            
            # 查找当前爬虫的配置
            spider_name = spider.name
            site_name = None
            
            for spider_config in config['spiders']:
                if spider_config['name'] == spider_name:
                    site_name = spider_config['site_name']
                    break
            
            # 如果没找到配置，使用爬虫名称作为后备
            if not site_name:
                site_name = spider_name
            
            # 创建站点目录
            site_dir = FileUtils.create_directories(self.output_dir, site_name)
            
            # 下载各种媒体文件
            self._download_media_files(adapter.get('images', []), site_dir, "image", "image")
            self._download_media_files(adapter.get('videos', []), site_dir, "video", "video")
            self._download_media_files(adapter.get('audios', []), site_dir, "audio", "audio")
            
            # 下载PDF和其他文件
            main_files = adapter.get('main_files', [])
            for file_info in main_files:
                # 下载文件并更新file_info
                updated_file_info = self._download_media_file_and_update_info(file_info["full_url"], site_dir, "main_file", file_info)
                if updated_file_info:
                    # 更新item中的file_info
                    file_info.update(updated_file_info)

            attachment_files = adapter.get('attachment_files', [])
            for file_info in attachment_files:
                # 下载文件并更新file_info
                updated_file_info = self._download_media_file_and_update_info(file_info["full_url"], site_dir, "attachment_file", file_info)
                if updated_file_info:
                    # 更新item中的file_info
                    file_info.update(updated_file_info)
        
        return item
    
    def _download_media_files(self, urls: list, site_dir: Path, media_type: str, record_type: str):
        """批量下载媒体文件 - 使用异步下载"""
        if not urls:
            return
        
        for url in urls:
            # 创建目录结构
            file_subdir = site_dir / media_type
            file_subdir.mkdir(parents=True, exist_ok=True)
            
            # 文件路径
            file_path = file_subdir / "temp"
            
            # 添加下载任务到队列
            self.downloader.add_download_task(
                url, 
                str(file_path),
                media_type,
                record_type,
                site_dir,
                callback=self._download_callback
            )
            
            # 更新统计
            self.download_stats['total_files'] += 1
        
        print(f"📋 异步管道发现 {len(urls)} 个{media_type}文件需要下载")
    
    def _download_media_file_and_update_info(self, file_url: str, site_dir: Path, media_type: str, file_info: dict) -> dict:
        """下载媒体文件并更新file_info"""
        # 处理data:资源
        if file_url.startswith('data:'):
            return self._download_data_url_and_update_info(file_url, site_dir, media_type, file_info)
        
        # 处理YouTube视频
        if 'youtube.com/watch' in file_url:
            return self._download_youtube_video_and_update_info(file_url, site_dir, media_type, file_info)
        
        # 处理Brightcove视频
        if 'brightcove.net' in file_url:
            return self._download_brightcove_video_and_update_info(file_url, site_dir, media_type, file_info)
        
        try:
            # 计算sha256哈希
            clean_url = str(file_url).strip()
            file_hash = hashlib.sha256(clean_url.encode('utf-8')).hexdigest().lower()
            
            # 获取文件扩展名
            parsed_url = urlparse(file_url)
            path = parsed_url.path
            file_extension = ''
            
            # 从URL路径中提取扩展名
            if '.' in path:
                file_extension = '.' + path.split('.')[-1].lower()
            
            # 如果没有扩展名，尝试从Content-Type推断
            if not file_extension:
                file_extension = self.downloader._get_extension_from_content_type(file_url, media_type)
            
            # 创建目录结构
            file_subdir = site_dir / media_type
            file_subdir.mkdir(parents=True, exist_ok=True)
            
            # 文件路径
            file_filename = f"{file_hash}{file_extension}"
            file_path = file_subdir / file_filename
            
            if file_path.exists():
                # 文件已存在，返回更新后的信息
                return {
                    'name': file_info.get('name', ''),
                    'path': f"{media_type}/{file_filename}"
                }
            
            # 添加下载任务到队列
            self.downloader.add_download_task(
                file_url, 
                str(file_path),
                media_type,
                "file",
                site_dir,
                callback=self._download_callback
            )
            
            # 返回更新后的file_info
            return {
                'name': file_info.get('name', ''),
                'path': f"{media_type}/{file_filename}"
            }
                
        except Exception as e:
            self.logger.warning(f"下载{media_type}失败: {e}")
            return None
    
    def _download_data_url_and_update_info(self, data_url: str, site_dir: Path, media_type: str, file_info: dict) -> dict:
        """下载data:URL资源并更新file_info"""
        try:
            # 解析data:URL
            if ',' not in data_url:
                self.logger.warning(f"无效的data:URL格式: {data_url}")
                return None
            
            header, data = data_url.split(',', 1)
            
            # 提取MIME类型和编码
            mime_type = 'application/octet-stream'
            encoding = None
            
            if ';' in header:
                parts = header.split(';')
                mime_type = parts[0].replace('data:', '')
                if 'base64' in parts:
                    encoding = 'base64'
            
            # 根据MIME类型确定扩展名
            file_extension = self.downloader._get_extension_from_mime_type(mime_type)
            
            # 解码数据
            if encoding == 'base64':
                file_data = base64.b64decode(data)
            else:
                file_data = data.encode('utf-8')
            
            # 使用SHA256哈希作为文件名
            clean_url = str(data_url).strip()
            file_hash = hashlib.sha256(clean_url.encode('utf-8')).hexdigest().lower()
            file_filename = f"{file_hash}{file_extension}"
            file_path = site_dir / media_type / file_filename
            
            # 确保目录存在
            os.makedirs(os.path.dirname(file_path), exist_ok=True)
            
            # 写入文件
            with open(file_path, 'wb') as f:
                f.write(file_data)
            
            print(f"下载data:{media_type}: {data_url[:50]}... -> {file_path}")
            
            # 返回更新后的file_info
            return {
                'name': file_info.get('name', ''),
                'path': f"{media_type}/{file_filename}"
            }
                
        except Exception as e:
            self.logger.warning(f"下载data:{media_type}失败: {e}")
            return None
    
    def _download_youtube_video_and_update_info(self, youtube_url: str, site_dir: Path, media_type: str, file_info: dict) -> dict:
        """下载YouTube视频并更新file_info"""
        try:
            # 计算sha256哈希
            clean_url = str(youtube_url).strip()
            file_hash = hashlib.sha256(clean_url.encode('utf-8')).hexdigest().lower()
            
            # 创建目录结构
            file_subdir = site_dir / media_type
            file_subdir.mkdir(parents=True, exist_ok=True)
            
            # 文件路径
            file_filename = f"{file_hash}.mp4"
            file_path = file_subdir / file_filename
            
            if file_path.exists():
                print(f"YouTube视频已存在: {file_path}")
                return {
                    'name': file_info.get('name', ''),
                    'path': f"{media_type}/{file_filename}"
                }
            
            # 添加下载任务到队列
            self.downloader.add_download_task(
                youtube_url, 
                str(file_path),
                media_type,
                "video",
                site_dir,
                callback=self._download_callback
            )
            
            return {
                'name': file_info.get('name', ''),
                'path': f"{media_type}/{file_filename}"
            }
                
        except Exception as e:
            self.logger.warning(f"下载YouTube视频失败: {e}")
            return None
    
    def _download_brightcove_video_and_update_info(self, brightcove_url: str, site_dir: Path, media_type: str, file_info: dict) -> dict:
        """下载Brightcove视频并更新file_info"""
        try:
            # 计算sha256哈希
            clean_url = str(brightcove_url).strip()
            file_hash = hashlib.sha256(clean_url.encode('utf-8')).hexdigest().lower()
            
            # 创建目录结构
            file_subdir = site_dir / media_type
            file_subdir.mkdir(parents=True, exist_ok=True)
            
            # 文件路径
            file_filename = f"{file_hash}.mp4"
            file_path = file_subdir / file_filename
            
            if file_path.exists():
                print(f"Brightcove视频已存在: {file_path}")
                return {
                    'name': file_info.get('name', ''),
                    'path': f"{media_type}/{file_filename}"
                }
            
            # 添加下载任务到队列
            self.downloader.add_download_task(
                brightcove_url, 
                str(file_path),
                media_type,
                "video",
                site_dir,
                callback=self._download_callback
            )
            
            return {
                'name': file_info.get('name', ''),
                'path': f"{media_type}/{file_filename}"
            }
                
        except Exception as e:
            self.logger.warning(f"下载Brightcove视频失败: {e}")
            return None
    
    def _download_callback(self, success, url, file_path):
        """下载完成回调"""
        if success:
            self.download_stats['successful_downloads'] += 1
            print(f"✅ 异步文件下载成功: {file_path}")
        else:
            self.download_stats['failed_downloads'] += 1
            print(f"❌ 异步文件下载失败: {url}")
        
        print(f"📊 异步下载进度: 成功 {self.download_stats['successful_downloads']}/{self.download_stats['total_files']}") 