import os
import re
import time
import requests
from DrissionPage import ChromiumPage, ChromiumOptions
from loguru import logger

def xhs_image_crawler(keyword: str, max_notes: int = 20):
    """
    采集小红书关键词帖子的图片 - 兼容版
    :param keyword: 搜索关键词
    :param max_notes: 最大采集笔记数
    """
    # 配置浏览器选项
    co = ChromiumOptions()
    co.ignore_certificate_errors(True)
    co.set_argument('--disable-blink-features=AutomationControlled')
    co.set_user_agent('Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36')
    
    # 初始化浏览器
    page = ChromiumPage(co)
    search_url = f'https://www.xiaohongshu.com/search_result?keyword={keyword}'
    page.get(search_url)

    # 创建结果保存目录
    timestamp = time.strftime("%Y%m%d_%H%M%S")
    result_dir = os.path.join("result", "Redbook", timestamp)
    os.makedirs(result_dir, exist_ok=True)
    logger.info(f"结果将保存到: {result_dir}")
    
    # 保存页面截图到结果目录
    screenshot_path = os.path.join(result_dir, f"debug_{keyword}.png")
    page.get_screenshot(path=screenshot_path, full_page=True)
    logger.info(f"已保存页面截图: {screenshot_path}")
    
    # 等待页面加载
    start_time = time.time()
    while time.time() - start_time < 15:
        if page.ele('.feeds-container', timeout=1):
            break
        time.sleep(0.5)
    else:
        logger.warning("未找到 .feeds-container 元素，继续执行")
    
    time.sleep(5)
    
    media_data = []
    note_count = 0
    
    # 新版选择器策略
    card_selectors = [
        '.note-item',
        '.card-container',
        '[data-redesign="true"] .feed-card',
        'div[data-v-] > div > div > div'
    ]
    
    # 尝试多种选择器
    note_cards = None
    for selector in card_selectors:
        note_cards = page.eles(selector)
        if note_cards:
            logger.success(f"使用选择器: {selector} 找到 {len(note_cards)} 张卡片")
            break
    
    if not note_cards:
        logger.error("未找到任何笔记卡片! 请检查页面截图和选择器")
        page.quit()
        return "失败: 无法定位内容"
    
    for card in note_cards:
        if note_count >= max_notes:
            break
            
        try:
            # 方法 1: 通过 data-id 属性获取笔记 ID
            note_id = card.attr('data-noteid') or card.attr('data-id')
            logger.info(f"方法1： 通过 data-id 获取到笔记 ID: {note_id}")
            
            # 方法 2: 通过点击打开详情页
            if not note_id:
                card.click()
                time.sleep(3)  # 等待详情页加载
                
                # 从 URL 获取笔记 ID
                current_url = page.url
                if '/explore/' in current_url:
                    note_id = current_url.split('/')[-1]
                    logger.info(f"方法2： 通过点击详情页获取到笔记 ID: {note_id}")
                else:
                    logger.warning(f"非常规 URL: {current_url}")
                    # 修复：使用 back() 替代 go_back()
                    page.back() if hasattr(page, 'back') else page.get(search_url)
                    continue
                
                # 获取标题
                title_ele = page.ele('.title', timeout=3) or page.ele('h1', timeout=2)
                title = title_ele.text if title_ele else f"无标题_{time.time()}"
                logger.info(f"获取到标题: {title}")
                
                # 获取图片
                img_containers = page.eles('.img-container')
                images = []
                if img_containers:
                    for container in img_containers:
                        img_tag = container.ele('tag:img', timeout=2)
                        if img_tag:
                            src = img_tag.attr('src')
                            if not src:
                                src = img_tag.attr('data-src')  # 检查备用属性
                            if src:
                                # 处理 URL 格式
                                if src.startswith('//'):
                                    src = f'https:{src}'
                                elif not src.startswith('http'):
                                    src = f'https://{src}'
                                images.append(src)
                                logger.debug(f"从 img-container 获取图片 URL: {src}")
                else:
                    logger.warning("未找到.img-container 元素，使用备用方案")
                    # 备用方案：原获取逻辑
                    img_elements = page.eles('img[src*="sns-webpic"]') or page.eles('img')
                    for img in img_elements:
                        src = img.attr('src') or img.attr('data-src')
                        if src:
                            images.append(src)

                # # 获取视频 - 新增功能
                # videos = []
                # video_elements = page.eles('video')  # 查找所有 video 元素
                # if video_elements:
                #     for video in video_elements:
                #         # 尝试获取 src 属性
                #         video_src = video.attr('src')
                #         if not video_src:
                #             # 尝试获取 data-src 属性
                #             video_src = video.attr('data-src')
                #         if video_src:
                #             # 处理 URL 格式
                #             if video_src.startswith('//'):
                #                 video_src = f'https:{video_src}'
                #             elif not video_src.startswith('http'):
                #                 video_src = f'https://{video_src}'
                #             videos.append(video_src)
                #             logger.debug(f"获取视频 URL: {video_src}")
                # else:
                #     logger.info("未找到视频元素")

                # 获取视频 - 新方法
                videos = []
                try:
                    # 方法 1：直接查找 video 元素
                    video_elements = page.eles('tag:video')
                    for video in video_elements:
                        video_src = video.attr('src')
                        if video_src and video_src.startswith('http'):
                            videos.append(video_src)
                            logger.debug(f"从 video 标签获取视频 URL: {video_src}")
                    
                    # 方法 2：如果没找到，使用 JavaScript 提取
                    if not videos:
                        logger.info("尝试通过 JavaScript 提取视频 URL")
                        video_data = page.run_js('''
                            try {
                                const sources = [];
                                const videoElements = document.querySelectorAll('video');
                                
                                // 遍历所有 video 元素
                                videoElements.forEach(video => {
                                    console.log("当前 video 元素:", video);
                                    // 检查 video 的直接 src 属性
                                    if (video.src && video.src.startsWith('http')) {
                                        sources.push(video.src);
                                    }
                                    
                                    // 检查 source 子元素
                                    const sourceTags = video.querySelectorAll('source');
                                    sourceTags.forEach(source => {
                                        if (source.src && source.src.startsWith('http')) {
                                            sources.push(source.src);
                                        }
                                    });
                                });
                                
                                // 如果还没找到，尝试从页面状态中提取
                                if (sources.length === 0) {
                                    const appData = window.__INITIAL_STATE__ || {};
                                    const noteId = window.location.pathname.split('/').pop();
                                    
                                    if (appData.note && appData.note.noteDetailMap && 
                                        appData.note.noteDetailMap[noteId] &&
                                        appData.note.noteDetailMap[noteId].note &&
                                        appData.note.noteDetailMap[noteId].note.video) {
                                        
                                        sources.push(appData.note.noteDetailMap[noteId].note.video.url);
                                    }
                                }
                                
                                return sources;
                            } catch (e) {
                                console.error('视频提取错误:', e);
                                return [];
                            }
                        ''')
                        
                        if video_data:
                            videos = [url for url in video_data if url and url.startswith('http')]
                            for url in videos:
                                logger.debug(f"通过 JS 获取视频 URL: {url}")
                    
                    logger.success(f"找到 {len(videos)} 个视频资源")
                    
                except Exception as e:
                    logger.error(f"提取视频时出错: {e}")
                    videos = []
                
                # 存储数据
                media_data.append({
                    "title": title,
                    "note_id": note_id,
                    "images": images,
                    "videos": videos
                })
                note_count += 1
                logger.success(f"获取笔记: {title[:15]}... ({len(images)}张图片, {len(videos)}个视频)")
                
                # 修复：使用 back() 替代 go_back()
                if hasattr(page, 'back'):
                    page.back()
                else:
                    page.get(search_url)
                time.sleep(2)
                
            else:
                # 直接解析卡片内数据
                title = card.ele('.title').text if card.ele('.title') else f"笔记_{note_id}"
                
                # 获取图片预览
                preview_imgs = card.eles('img[src*="sns-img"]')
                images = [img.attr('src') for img in preview_imgs] if preview_imgs else []
                
                # 卡片上无法直接获取视频 URL，留空
                media_data.append({
                    "title": title,
                    "note_id": note_id,
                    "images": images,
                    "videos": []  # 卡片视图无法获取视频 URL
                })
                note_count += 1
                logger.info(f"快速获取: {title[:15]}... ({len(images)}张预览图)")
                
        except Exception as e:
            logger.error(f"处理卡片失败: {e}")
            # 重置页面状态
            if '/search_result' not in page.url:
                page.get(search_url)
                time.sleep(3)
            continue
    
    # 下载媒体文件（图片和视频）
    if not media_data:
        logger.error("未获取到任何有效笔记数据")
        page.quit()
        return "失败: 无有效数据"
    
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36",
        "Referer": "https://www.xiaohongshu.com/"
    }
    
    for idx, note in enumerate(media_data):
        clean_title = re.sub(r'[\\/*?:"<>|]', "", note['title'])
        dir_name = f"{idx+1:03d}-{clean_title[:20]}"[:50]  # 限制路径长度
        
        # 修改：在结果目录下创建笔记文件夹
        note_dir = os.path.join(result_dir, dir_name)
        os.makedirs(note_dir, exist_ok=True)
        
        # 下载图片
        for img_idx, img_url in enumerate(note["images"]):
            try:
                if not img_url.startswith('http'):
                    img_url = f"https:{img_url}" if img_url.startswith('//') else f"https://{img_url}"
                
                # 提取文件扩展名
                ext_match = re.search(r'\.(jpe?g|png|webp)', img_url, re.I)
                ext = f".{ext_match.group(1)}" if ext_match else ".jpg"
                
                response = requests.get(img_url, headers=headers, timeout=15)
                if response.status_code == 200:
                    with open(f"{note_dir}/{img_idx+1:02d}{ext}", "wb") as f:
                        f.write(response.content)
                    logger.success(f"下载: {dir_name}/{img_idx+1:02d}{ext}")
                else:
                    logger.warning(f"下载失败: HTTP {response.status_code} - {img_url}")
                
                time.sleep(1)  # 增加延迟
            except Exception as e:
                logger.error(f"下载异常: {img_url} - {e}")

        # 下载视频 - 新增功能
        for vid_idx, video_url in enumerate(note["videos"]):
            try:
                if not video_url.startswith('http'):
                    video_url = f"https:{video_url}" if video_url.startswith('//') else f"https://{video_url}"
                
                # 提取文件扩展名，默认为 mp4
                ext_match = re.search(r'\.(mp4|mov|webm|avi)', video_url, re.I)
                ext = f".{ext_match.group(1)}" if ext_match else ".mp4"
                
                # 增加视频下载的超时时间
                response = requests.get(video_url, headers=headers, timeout=30)
                if response.status_code == 200:
                    video_path = f"{note_dir}/video_{vid_idx+1:02d}{ext}"
                    with open(video_path, "wb") as f:
                        f.write(response.content)
                    logger.success(f"下载视频: {video_path}")
                else:
                    logger.warning(f"视频下载失败: HTTP {response.status_code} - {video_url}")
                
                time.sleep(3)  # 视频下载后增加更长的延迟
            except Exception as e:
                logger.error(f"视频下载异常: {video_url} - {e}")
    
    page.quit()
    return f"完成! 成功处理{len(media_data)}篇笔记，结果保存在: {result_dir}"
