#!/usr/bin/env python
# -*- coding: utf-8 -*-

import os
import time
import json
import random
import pandas as pd
import requests
from datetime import datetime
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, NoSuchElementException
from webdriver_manager.chrome import ChromeDriverManager
from tqdm import tqdm


class XiaohongshuScraper:
    """小红书爬虫类，用于搜索关键词并抓取数据"""
    
    def __init__(self, headless=False):
        """初始化爬虫

        Args:
            headless (bool, optional): 是否使用无头模式. Defaults to False.
        """
        self.base_url = "https://www.xiaohongshu.com"
        self.search_url = f"{self.base_url}/explore"
        self.options = Options()
        
        if headless:
            self.options.add_argument("--headless")
        
        self.options.add_argument("--disable-gpu")
        self.options.add_argument("--window-size=1920,1080")
        self.options.add_argument("--no-sandbox")
        self.options.add_argument("--disable-dev-shm-usage")
        self.options.add_argument("--disable-blink-features=AutomationControlled")
        self.options.add_experimental_option("excludeSwitches", ["enable-automation"])
        self.options.add_experimental_option("useAutomationExtension", False)
        
        self.driver = None
        self.data = []
        self.current_keyword = ""
        self.login_url = f"{self.base_url}/login"
        self.is_logged_in = False
        
    def start_browser(self):
        """启动浏览器"""
        try:
            # 尝试使用 WebDriverManager
            service = Service(ChromeDriverManager().install())
            self.driver = webdriver.Chrome(service=service, options=self.options)
        except Exception as e:
            print(f"使用 WebDriverManager 失败: {str(e)}")
            print("尝试使用本地 ChromeDriver...")
            
            # 尝试在当前目录寻找 chromedriver.exe
            current_dir = os.path.dirname(os.path.abspath(__file__))
            chrome_driver_path = os.path.join(current_dir, "chromedriver.exe")
            
            if not os.path.exists(chrome_driver_path):
                print(f"在 {chrome_driver_path} 未找到 chromedriver.exe")
                print("请从 https://chromedriver.chromium.org/downloads 下载与您的 Chrome 浏览器版本匹配的 ChromeDriver")
                print("并将其放在脚本同一目录下")
                raise Exception("ChromeDriver 未找到")
            
            service = Service(chrome_driver_path)
            self.driver = webdriver.Chrome(service=service, options=self.options)
            
        self.driver.execute_script("Object.defineProperty(navigator, 'webdriver', {get: () => undefined})")
        # self.driver.execute_cdp_cmd('Network.setUserAgentOverride', {
        #     "userAgent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
        # })
        
    def close_browser(self):
        """关闭浏览器"""
        if self.driver:
            self.driver.quit()
            self.driver = None
    
    def search_keyword(self, keyword, max_pages=5):
        """搜索关键词并抓取数据
        
        Args:
            keyword (str): 要搜索的关键词
            max_pages (int, optional): 最大翻页数. Defaults to 5.
        
        Returns:
            pd.DataFrame: 包含抓取数据的DataFrame
        """
        if not self.driver:
            self.start_browser()
            
        try:
            # 打开搜索页面
            self.driver.get(self.search_url)
            print(f"正在搜索关键词: {keyword}")
            
            # 等待搜索框出现并输入关键词
            wait = WebDriverWait(self.driver, 10)
            search_input = wait.until(EC.presence_of_element_located((By.ID, "search-input")))
            search_input.clear()
            search_input.send_keys(keyword)
            search_input.send_keys(Keys.ENTER)
            
            # 等待搜索结果加载
            time.sleep(3)
            
            # 翻页抓取数据
            for page in range(max_pages):
                print(f"正在抓取第 {page+1} 页数据...")
                self._extract_posts_from_current_page()
                
                # 尝试点击下一页
                try:
                    next_button = self.driver.find_element(By.XPATH, "//div[contains(@class, 'pagination')]/button[text()='下一页']")
                    if not next_button.is_enabled():
                        print("已到达最后一页")
                        break
                    next_button.click()
                    time.sleep(random.uniform(2, 4))  # 随机等待避免被检测
                except NoSuchElementException:
                    print("未找到下一页按钮")
                    break
            
            # 保存数据
            df = pd.DataFrame(self.data)
            output_file = f"xiaohongshu_{keyword}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.csv"
            df.to_csv(output_file, index=False, encoding='utf-8-sig')
            print(f"数据已保存到 {output_file}")
            
            return df
            
        except Exception as e:
            print(f"搜索过程中出错: {str(e)}")
            return pd.DataFrame(self.data)
        
    def _extract_posts_from_current_page(self):
        """从当前页面提取帖子数据"""
        wait = WebDriverWait(self.driver, 10)
        
        try:
            # 等待帖子容器加载
            post_cards = wait.until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, "section.note-item")))
            
            for card in post_cards:
                try:
                    # 提取帖子信息
                    title_element = card.find_element(By.CSS_SELECTOR, "a.title span")
                    title = title_element.text.strip() if title_element else "无标题"
                    
                    author_element = card.find_element(By.CSS_SELECTOR, "div.author-wrapper .name")
                    author = author_element.text.strip() if author_element else "未知作者"
                    
                    likes_element = card.find_element(By.CSS_SELECTOR, "span.like-wrapper .count")
                    likes = likes_element.text.strip() if likes_element else "0"
                    
                    url_element = card.find_element(By.CSS_SELECTOR, "a[href^='/search_result/']")
                    url = url_element.get_attribute("href") if url_element else ""
                    if url and not url.startswith("http"):
                        url = self.base_url + url
                    
                    post_data = {
                        "title": title,
                        "author": author,
                        "likes": likes,
                        "url": url,
                        "source_keyword": self.current_keyword,
                        "crawl_time": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
                    }
                    
                    self.data.append(post_data)
                    
                except NoSuchElementException as e:
                    print(f"提取帖子信息出错: {str(e)}")
                    continue
                
        except TimeoutException:
            print("等待帖子加载超时")
    
    def get_post_detail(self, url):
        """获取帖子详情
        
        Args:
            url (str): 帖子URL
            
        Returns:
            dict: 帖子详细信息
        """
        if not self.driver:
            self.start_browser()
            
        try:
            print(f"正在打开链接: {url}")
            self.driver.get(url)
            wait = WebDriverWait(self.driver, 10)
            
            # 等待帖子内容加载（尝试多种可能的选择器）
            try:
                content_element = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "div.content")))
            except:
                try:
                    content_element = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "div.note-content")))
                except:
                    content_element = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "div.content-container")))
            
            # 提取帖子详细信息（尝试多种可能的选择器）
            try:
                title = self.driver.find_element(By.CSS_SELECTOR, "h1.title").text.strip()
            except:
                try:
                    title = self.driver.find_element(By.CSS_SELECTOR, "div.note-title").text.strip()
                except:
                    title = "无标题"
            
            content = content_element.text.strip() if content_element else "无内容"
            
            try:
                author = self.driver.find_element(By.CSS_SELECTOR, "div.author-name").text.strip()
            except:
                try:
                    author = self.driver.find_element(By.CSS_SELECTOR, "div.user-nickname").text.strip()
                except:
                    author = "未知作者"
            
            try:
                publish_time = self.driver.find_element(By.CSS_SELECTOR, "div.publish-time").text.strip()
            except:
                try:
                    publish_time = self.driver.find_element(By.CSS_SELECTOR, "div.note-time").text.strip()
                except:
                    publish_time = "未知时间"
            
            # 提取图片 - 改进的图片提取方法
            images = []
            try:
                # 查找所有可能的图片元素
                selectors = [
                    "img.note-image", 
                    "div.swiper-slide img",
                    "div.swiper-slide-active img",
                    "div.slide-item img",
                    ".image-viewer-container img",
                    ".xg-image img"
                ]
                
                for selector in selectors:
                    img_elements = self.driver.find_elements(By.CSS_SELECTOR, selector)
                    if img_elements:
                        for img in img_elements:
                            img_url = img.get_attribute("src")
                            if img_url and img_url not in images:
                                images.append(img_url)
                        # 如果找到了图片，就不继续尝试其他选择器
                        if images:
                            break
                
                # 如果没有找到图片，尝试滑动容器
                if not images:
                    # 尝试不同的滑动容器选择器
                    swiper_selectors = ["div.swiper-container", "div.image-container", "div.slide-container"]
                    
                    for swiper_selector in swiper_selectors:
                        try:
                            swiper_container = self.driver.find_element(By.CSS_SELECTOR, swiper_selector)
                            
                            # 找到下一张按钮
                            next_button_selectors = [
                                "div.swiper-button-next", 
                                ".right-button", 
                                "button.next",
                                ".nav-button.right"
                            ]
                            
                            next_button = None
                            for button_selector in next_button_selectors:
                                try:
                                    next_button = self.driver.find_element(By.CSS_SELECTOR, button_selector)
                                    break
                                except:
                                    continue
                            
                            if next_button:
                                # 首先获取当前显示的图片
                                for selector in ["div.swiper-slide-active img", "div.active img", ".current-slide img"]:
                                    try:
                                        current_img = self.driver.find_element(By.CSS_SELECTOR, selector)
                                        img_url = current_img.get_attribute("src")
                                        if img_url and img_url not in images:
                                            images.append(img_url)
                                        break
                                    except:
                                        continue
                                
                                # 估计总图片数量
                                total_images = 10  # 默认值
                                try:
                                    pagination_selectors = [
                                        "div.swiper-pagination", 
                                        ".pagination", 
                                        ".slide-count"
                                    ]
                                    for selector in pagination_selectors:
                                        try:
                                            pagination = self.driver.find_element(By.CSS_SELECTOR, selector)
                                            pagination_text = pagination.text
                                            if '/' in pagination_text:
                                                total_images = int(pagination_text.split('/')[-1])
                                                break
                                        except:
                                            continue
                                except:
                                    pass  # 使用默认值
                                
                                # 循环点击下一张按钮
                                for _ in range(min(total_images, 20) - 1):  # 最多处理20张图片
                                    try:
                                        next_button.click()
                                        time.sleep(0.8)  # 等待图片加载
                                        
                                        # 获取当前显示的图片
                                        for selector in ["div.swiper-slide-active img", "div.active img", ".current-slide img"]:
                                            try:
                                                current_img = self.driver.find_element(By.CSS_SELECTOR, selector)
                                                img_url = current_img.get_attribute("src")
                                                if img_url and img_url not in images:
                                                    images.append(img_url)
                                                break
                                            except:
                                                continue
                                    except:
                                        break
                                
                                # 如果找到图片，停止尝试其他容器
                                if images:
                                    break
                        except:
                            continue
                
                print(f"找到 {len(images)} 张图片")
            except Exception as e:
                print(f"提取图片出错: {str(e)}")
            
            # 提取评论
            comments = []
            try:
                comment_elements = self.driver.find_elements(By.CSS_SELECTOR, "div.comment-item")
                for comment in comment_elements[:10]:  # 只获取前10条评论
                    comment_author = comment.find_element(By.CSS_SELECTOR, "div.comment-author").text.strip()
                    comment_content = comment.find_element(By.CSS_SELECTOR, "div.comment-content").text.strip()
                    comments.append({
                        "author": comment_author,
                        "content": comment_content
                    })
            except NoSuchElementException:
                pass
            
            post_detail = {
                "title": title,
                "content": content,
                "author": author,
                "publish_time": publish_time,
                "url": url,
                "images": images,
                "comments": comments,
                "crawl_time": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
            }
            
            return post_detail
            
        except Exception as e:
            print(f"获取帖子详情出错: {str(e)}")
            return None

    def login(self):
        """登录小红书，支持扫码登录
        
        Returns:
            bool: 是否登录成功
        """
        if not self.driver:
            self.start_browser()
            
        try:
            print("正在打开登录页面...")
            self.driver.get(self.login_url)
            wait = WebDriverWait(self.driver, 10)
            
            # 等待登录页面加载
            print("请使用小红书APP扫描二维码登录")
            # 确保显示扫码界面（可能需要点击扫码登录按钮）
            try:
                scan_login_tab = wait.until(EC.element_to_be_clickable((By.XPATH, "//div[contains(text(), '扫码登录') or contains(text(), '二维码')]")))
                scan_login_tab.click()
                print("已切换到扫码登录界面")
            except:
                print("默认为扫码登录界面或未找到扫码登录选项")
            
            # 等待用户扫码
            input("扫码完成后请按回车键继续...")
            
            # 检查是否登录成功
            time.sleep(3)  # 给登录一些处理时间
            
            try:
                # 尝试查找登录后才会出现的元素，如用户头像
                wait.until(EC.presence_of_element_located((By.XPATH, "//div[contains(@class, 'avatar') or contains(@class, 'user-avatar')]")))
                print("登录成功！")
                self.is_logged_in = True
                return True
            except:
                print("登录失败或超时，将以非登录状态继续")
                return False
                
        except Exception as e:
            print(f"登录过程中出错: {str(e)}")
            return False

    def download_images(self, image_urls, output_dir="images"):
        """下载图片并返回本地路径列表
        
        Args:
            image_urls (list): 图片URL列表
            output_dir (str): 图片保存目录
            
        Returns:
            list: 本地图片路径列表
        """
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)
            
        local_paths = []
        
        for i, url in enumerate(image_urls):
            try:
                # 生成文件名
                timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
                filename = f"img_{timestamp}_{i}.jpg"
                filepath = os.path.join(output_dir, filename)
                
                # 下载图片
                response = requests.get(url, stream=True)
                if response.status_code == 200:
                    with open(filepath, 'wb') as f:
                        for chunk in response.iter_content(1024):
                            f.write(chunk)
                    local_paths.append(filepath)
                    print(f"图片已保存: {filepath}")
                else:
                    print(f"下载图片失败，状态码: {response.status_code}")
            except Exception as e:
                print(f"下载图片出错: {str(e)}")
                
        return local_paths
    
    def process_posts_details_with_images(self, urls, output_file=None, output_format="excel"):
        """处理帖子详情并下载图片
        
        Args:
            urls (list): 帖子URL列表
            output_file (str, optional): 输出文件名. Defaults to None.
            output_format (str, optional): 输出格式，可选 "excel", "html". Defaults to "excel".
            
        Returns:
            pd.DataFrame: 包含详情和图片的DataFrame
        """
        details = []
        
        for i, url in enumerate(urls):
            print(f"正在处理第 {i+1}/{len(urls)} 个帖子: {url}")
            post = self.get_post_detail(url)
            
            if post:
                if 'images' in post and post['images']:
                    # 将图片URL转换为字典，以便在DataFrame中显示为多列
                    for j, img_url in enumerate(post['images']):
                        post[f'image_{j+1}_url'] = img_url
                    
                    # 移除原始图片列表以避免重复
                    post.pop('images')
                
                details.append(post)
            
            time.sleep(random.uniform(1, 2))  # 随机等待
                
        if details:
            # 创建DataFrame
            df = pd.DataFrame(details)
            
            # 根据输出格式保存文件
            if output_file:
                file_name, _ = os.path.splitext(output_file)
                if output_format.lower() == "excel":
                    excel_file = f"{file_name}.xlsx"
                    df.to_excel(excel_file, index=False)
                    print(f"详情数据已保存到 {excel_file}")
                
                elif output_format.lower() == "html":
                    html_file = f"{file_name}.html"
                    
                    # 创建HTML，使用卡片式布局而不是表格
                    html_content = """<!DOCTYPE html>
<html>
<head>
    <title>小红书数据</title>
    <meta charset="utf-8">
    <meta name="viewport" content="width=device-width, initial-scale=1">
    <style>
        * {
            box-sizing: border-box;
            margin: 0;
            padding: 0;
            font-family: 'Segoe UI', Arial, sans-serif;
        }
        
        body {
            background-color: #f7f7f7;
            padding: 20px;
            color: #333;
        }
        
        h1 {
            text-align: center;
            margin: 20px 0;
            color: #ff2442;
        }
        
        .card-container {
            display: flex;
            flex-wrap: wrap;
            justify-content: center;
            gap: 20px;
            margin: 20px auto;
        }
        
        .card {
            background-color: white;
            border-radius: 15px;
            box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
            padding: 20px;
            width: 100%;
            max-width: 800px;
            margin-bottom: 20px;
            transition: transform 0.3s ease;
        }
        
        .card:hover {
            transform: translateY(-5px);
            box-shadow: 0 8px 16px rgba(0, 0, 0, 0.15);
        }
        
        .card-field {
            margin-bottom: 12px;
            line-height: 1.5;
        }
        
        .card-label {
            font-weight: bold;
            color: #ff2442;
            margin-right: 10px;
            display: block;
            margin-bottom: 4px;
        }
        
        .card-value {
            display: block;
            word-wrap: break-word;
        }
        
        .card-title {
            font-size: 1.3em;
            font-weight: bold;
            margin-bottom: 15px;
            padding-bottom: 10px;
            border-bottom: 1px solid #eee;
        }
        
        .image-container {
            display: flex;
            flex-wrap: wrap;
            gap: 10px;
            margin-top: 10px;
        }
        
        .image-container img {
            max-width: 230px;
            max-height: 230px;
            border-radius: 8px;
            object-fit: cover;
            box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
            transition: transform 0.2s ease;
        }
        
        .image-container img:hover {
            transform: scale(1.05);
            box-shadow: 0 4px 8px rgba(0, 0, 0, 0.2);
        }
        
        a {
            color: #ff2442;
            text-decoration: none;
        }
        
        a:hover {
            text-decoration: underline;
        }
        
        .comments-section {
            margin-top: 10px;
        }
        
        .comments-list {
            margin-top: 8px;
            max-height: 200px;
            overflow-y: auto;
            border-left: 2px solid #ff2442;
            padding-left: 10px;
        }
        
        .comment-item {
            margin-bottom: 8px;
            padding-bottom: 8px;
            border-bottom: 1px dotted #eee;
        }
        
        .comment-author {
            font-weight: bold;
            color: #444;
        }
        
        .comment-content {
            color: #666;
        }
        
        .card-footer {
            margin-top: 15px;
            padding-top: 10px;
            border-top: 1px solid #eee;
            color: #999;
            font-size: 0.9em;
            text-align: right;
        }
    </style>
</head>
<body>
    <h1>小红书帖子详情</h1>
    
    <div class="card-container">
"""
                    
                    # 为每条记录创建卡片
                    for _, row in df.iterrows():
                        html_content += '<div class="card">\n'
                        
                        # 遍历每个字段，不包括图片URL字段
                        for col in df.columns:
                            if not col.startswith('image_'):
                                if col == 'title':
                                    field_name = '标题'
                                elif col == 'content':
                                    field_name = '内容'
                                elif col == 'author':
                                    field_name = '作者'
                                elif col == 'publish_time':
                                    field_name = '发布时间'
                                elif col == 'url':
                                    field_name = '链接'
                                elif col == 'comments':
                                    field_name = '评论'
                                elif col == 'crawl_time':
                                    field_name = '抓取时间'
                                else:
                                    field_name = col
                                
                                # 根据字段类型进行特殊处理
                                if col == 'url' and pd.notnull(row[col]):
                                    html_content += f'<div class="card-field">\n'
                                    html_content += f'<div class="card-label">{field_name}</div>\n'
                                    html_content += f'<div class="card-value"><a href="{row[col]}" target="_blank">点击查看原文</a></div>\n'
                                    html_content += f'</div>\n'
                                elif col == 'comments' and pd.notnull(row[col]) and row[col]:
                                    # 解析并显示评论
                                    try:
                                        comments_data = row[col]
                                        if isinstance(comments_data, str):
                                            import json
                                            comments_data = json.loads(comments_data.replace("'", '"'))
                                        
                                        html_content += f'<div class="card-field">\n'
                                        html_content += f'<div class="card-label">{field_name}</div>\n'
                                        html_content += f'<div class="comments-section">\n'
                                        html_content += f'<div class="comments-list">\n'
                                        
                                        if isinstance(comments_data, list) and comments_data:
                                            for comment in comments_data:
                                                html_content += f'<div class="comment-item">\n'
                                                html_content += f'<div class="comment-author">{comment.get("author", "")}</div>\n'
                                                html_content += f'<div class="comment-content">{comment.get("content", "")}</div>\n'
                                                html_content += f'</div>\n'
                                        
                                        html_content += f'</div>\n'
                                        html_content += f'</div>\n'
                                        html_content += f'</div>\n'
                                    except:
                                        # 如果解析失败，则以文本形式显示
                                        html_content += f'<div class="card-field">\n'
                                        html_content += f'<div class="card-label">{field_name}</div>\n'
                                        html_content += f'<div class="card-value">{str(row[col])}</div>\n'
                                        html_content += f'</div>\n'
                                elif col == 'crawl_time':
                                    # 将抓取时间放到卡片底部
                                    continue
                                else:
                                    if pd.notnull(row[col]):
                                        html_content += f'<div class="card-field">\n'
                                        html_content += f'<div class="card-label">{field_name}</div>\n'
                                        html_content += f'<div class="card-value">{str(row[col])}</div>\n'
                                        html_content += f'</div>\n'
                        
                        # 添加图片区域
                        image_cols = [col for col in df.columns if col.startswith('image_') and pd.notnull(row[col])]
                        if image_cols:
                            html_content += f'<div class="card-field">\n'
                            html_content += f'<div class="card-label">图片</div>\n'
                            html_content += f'<div class="image-container">\n'
                            
                            for img_col in image_cols:
                                if pd.notnull(row[img_col]):
                                    html_content += f'<img src="{row[img_col]}" alt="图片">\n'
                                    
                            html_content += f'</div>\n'
                            html_content += f'</div>\n'
                        
                        # 添加抓取时间到卡片底部
                        if 'crawl_time' in df.columns and pd.notnull(row['crawl_time']):
                            html_content += f'<div class="card-footer">\n'
                            html_content += f'抓取时间: {row["crawl_time"]}\n'
                            html_content += f'</div>\n'
                        
                        html_content += '</div>\n'
                    
                    html_content += """
    </div>
</body>
</html>"""
                    
                    # 保存HTML文件
                    with open(html_file, "w", encoding="utf-8") as f:
                        f.write(html_content)
                    print(f"详情数据已保存到 {html_file} (HTML格式)")
            
            return df
        else:
            print("未获取到任何帖子详情")
            return pd.DataFrame()


def main():
    """主函数"""
    keywords = input("请输入要搜索的关键词，多个关键词用逗号分隔: ").split(",")
    keywords = [k.strip() for k in keywords if k.strip()]
    
    max_pages = int(input("每个关键词抓取的最大页数 (默认5): ") or "5")
    
    need_login = input("是否需要登录? (y/n，默认n): ").lower() == 'y'
    
    # 创建爬虫实例
    scraper = XiaohongshuScraper(headless=False)
    
    try:
        # 如果需要登录
        if need_login:
            scraper.login()
        
        all_data = []
        
        # 对每个关键词进行搜索
        for keyword in keywords:
            scraper.current_keyword = keyword
            df = scraper.search_keyword(keyword, max_pages=max_pages)
            all_data.append(df)
            print(f"关键词 '{keyword}' 共抓取到 {len(df)} 条数据")
        
        # 合并所有数据
        if all_data:
            combined_df = pd.concat(all_data) if len(all_data) > 1 else all_data[0]
            combined_df.reset_index(drop=True, inplace=True)
            
            # 询问是否需要获取详细信息和图片
            need_details = input("是否需要获取详细信息和图片? (y/n，默认n): ").lower() == 'y'
            
            if need_details and 'url' in combined_df.columns and not combined_df.empty:
                # 选择输出格式
                output_format = input("选择输出格式 (excel/html，默认html): ").lower() or "html"
                urls = combined_df['url'].tolist()
                
                timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
                details_df = scraper.process_posts_details_with_images(
                    urls, 
                    output_file=f"xiaohongshu_details_{timestamp}",
                    output_format=output_format
                )
                print("详细信息和图片获取完成！")
            
            # 询问是否保持浏览器打开
            keep_browser = input("是否保持浏览器打开? (y/n，默认n): ").lower() == 'y'
            
            if not keep_browser:
                scraper.close_browser()
            else:
                print("浏览器将保持打开状态，请手动关闭窗口。")
            
    except Exception as e:
        print(f"程序运行出错: {str(e)}")
        import traceback
        traceback.print_exc()
        # 询问是否保持浏览器打开，即使出错
        keep_browser = input("是否保持浏览器打开? (y/n，默认n): ").lower() == 'y'
        if not keep_browser:
            scraper.close_browser()
        else:
            print("浏览器将保持打开状态，请手动关闭窗口。")


if __name__ == "__main__":
    main() 