#!/usr/bin/env python
# -*- coding: utf-8 -*-

import os
import time
import json
import random
import logging
import argparse
import pandas as pd
from datetime import datetime
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, NoSuchElementException, ElementClickInterceptedException
from webdriver_manager.chrome import ChromeDriverManager
from tqdm import tqdm

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler("xiaohongshu_scraper.log", encoding='utf-8'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)


class AdvancedXiaohongshuScraper:
    """高级小红书爬虫类，支持登录和详细数据抓取"""
    
    def __init__(self, headless=False, wait_time=10, data_dir="data"):
        """初始化爬虫

        Args:
            headless (bool, optional): 是否使用无头模式. Defaults to False.
            wait_time (int, optional): 等待元素的最大时间(秒). Defaults to 10.
            data_dir (str, optional): 数据保存目录. Defaults to "data".
        """
        self.base_url = "https://www.xiaohongshu.com"
        self.search_url = f"{self.base_url}/search"
        self.login_url = f"{self.base_url}/login"
        self.wait_time = wait_time
        self.data_dir = data_dir
        
        # 确保数据目录存在
        if not os.path.exists(data_dir):
            os.makedirs(data_dir)
            
        # 配置Chrome选项
        self.options = Options()
        
        if headless:
            self.options.add_argument("--headless")
        
        self.options.add_argument("--disable-gpu")
        self.options.add_argument("--window-size=1920,1080")
        self.options.add_argument("--no-sandbox")
        self.options.add_argument("--disable-dev-shm-usage")
        self.options.add_argument("--disable-blink-features=AutomationControlled")
        self.options.add_experimental_option("excludeSwitches", ["enable-automation"])
        self.options.add_experimental_option("useAutomationExtension", False)
        
        # 添加用户代理
        self.user_agents = [
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36 Edg/119.0.0.0',
            'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.1 Safari/605.1.15'
        ]
        
        self.driver = None
        self.is_logged_in = False
        self.data = []
        self.current_keyword = ""
        
    def start_browser(self):
        """启动浏览器"""
        service = Service(ChromeDriverManager().install())
        self.driver = webdriver.Chrome(service=service, options=self.options)
        
        # 设置窗口大小
        self.driver.set_window_size(1920, 1080)
        
        # 随机选择一个用户代理
        user_agent = random.choice(self.user_agents)
        self.driver.execute_script("Object.defineProperty(navigator, 'webdriver', {get: () => undefined})")
        self.driver.execute_cdp_cmd('Network.setUserAgentOverride', {"userAgent": user_agent})
        
        logger.info(f"浏览器已启动，使用用户代理: {user_agent}")
        
    def close_browser(self):
        """关闭浏览器"""
        if self.driver:
            self.driver.quit()
            self.driver = None
            logger.info("浏览器已关闭")
            
    def load_cookies(self, cookie_file="cookies.json"):
        """从文件加载cookies

        Args:
            cookie_file (str, optional): cookie文件路径. Defaults to "cookies.json".
            
        Returns:
            bool: 是否成功加载cookies
        """
        if not os.path.exists(cookie_file):
            logger.warning(f"Cookie文件 {cookie_file} 不存在")
            return False
            
        try:
            with open(cookie_file, 'r') as f:
                cookies = json.load(f)
                
            for cookie in cookies:
                if 'expiry' in cookie:
                    del cookie['expiry']
                self.driver.add_cookie(cookie)
                
            logger.info(f"已从 {cookie_file} 加载 {len(cookies)} 个cookies")
            return True
            
        except Exception as e:
            logger.error(f"加载cookies出错: {str(e)}")
            return False
            
    def save_cookies(self, cookie_file="cookies.json"):
        """保存cookies到文件

        Args:
            cookie_file (str, optional): cookie文件路径. Defaults to "cookies.json".
        """
        try:
            cookies = self.driver.get_cookies()
            with open(cookie_file, 'w') as f:
                json.dump(cookies, f)
                
            logger.info(f"已将 {len(cookies)} 个cookies保存到 {cookie_file}")
            
        except Exception as e:
            logger.error(f"保存cookies出错: {str(e)}")
    
    def login(self, username=None, password=None, cookie_file="cookies.json", wait_scan=True):
        """登录小红书

        Args:
            username (str, optional): 用户名/手机号. Defaults to None.
            password (str, optional): 密码. Defaults to None.
            cookie_file (str, optional): cookie文件路径. Defaults to "cookies.json".
            wait_scan (bool, optional): 是否等待扫码登录. Defaults to True.
            
        Returns:
            bool: 是否登录成功
        """
        if not self.driver:
            self.start_browser()
            
        # 首先尝试使用cookies登录
        self.driver.get(self.base_url)
        
        if os.path.exists(cookie_file):
            logger.info("尝试使用cookies登录")
            self.load_cookies(cookie_file)
            self.driver.refresh()
            time.sleep(3)
            
            # 检查是否登录成功
            if self._check_login_status():
                logger.info("使用cookies登录成功")
                self.is_logged_in = True
                return True
                
        # 如果cookies登录失败，尝试扫码或用户名密码登录
        try:
            logger.info("打开登录页面")
            self.driver.get(self.login_url)
            
            # 等待登录页面加载
            wait = WebDriverWait(self.driver, self.wait_time)
            
            # 如果提供了用户名和密码，使用密码登录
            if username and password:
                logger.info("尝试使用用户名密码登录")
                
                # 选择密码登录方式（根据实际页面元素调整选择器）
                try:
                    password_login_tab = wait.until(EC.element_to_be_clickable((By.XPATH, "//div[contains(text(), '密码登录')]")))
                    password_login_tab.click()
                except:
                    logger.warning("未找到密码登录选项，将使用当前登录方式")
                
                # 输入用户名
                username_input = wait.until(EC.presence_of_element_located((By.XPATH, "//input[@name='xhsId' or @placeholder='请输入手机号/用户名']")))
                username_input.clear()
                username_input.send_keys(username)
                
                # 输入密码
                password_input = wait.until(EC.presence_of_element_located((By.XPATH, "//input[@name='password' or @placeholder='请输入密码']")))
                password_input.clear()
                password_input.send_keys(password)
                
                # 点击登录按钮
                login_button = wait.until(EC.element_to_be_clickable((By.XPATH, "//button[contains(text(), '登录')]")))
                login_button.click()
            else:
                # 否则使用扫码登录
                logger.info("请使用小红书APP扫码登录")
                
                # 确保显示扫码界面（可能需要点击扫码登录按钮）
                try:
                    scan_login_tab = wait.until(EC.element_to_be_clickable((By.XPATH, "//div[contains(text(), '扫码登录') or contains(text(), '二维码')]")))
                    scan_login_tab.click()
                    logger.info("已切换到扫码登录界面")
                except:
                    logger.info("默认为扫码登录界面或未找到扫码登录选项")
            
            # 等待可能出现的验证码
            time.sleep(3)
            
            # 检查是否存在验证码
            if "验证码" in self.driver.page_source:
                logger.warning("需要处理验证码，请手动完成验证")
                input("请在浏览器中完成验证码，然后按回车继续...")
                
            # 如果是扫码登录模式且需要等待用户扫码，则提示用户并等待
            if wait_scan and (not username or not password):
                logger.info("请使用小红书APP扫描二维码登录")
                input("扫码完成后请按回车键继续...")
            
            # 等待登录完成
            logger.info("等待登录完成...")
            time.sleep(5)
            
            # 检查是否登录成功
            if self._check_login_status():
                logger.info("登录成功")
                self.is_logged_in = True
                self.save_cookies(cookie_file)
                return True
            else:
                logger.error("登录失败，请检查用户名密码或手动登录")
                return False
                
        except Exception as e:
            logger.error(f"登录过程中出错: {str(e)}")
            return False
            
    def _check_login_status(self):
        """检查是否已登录

        Returns:
            bool: 是否已登录
        """
        try:
            # 检查页面上是否有登录状态的元素（例如用户头像）
            avatar = self.driver.find_element(By.XPATH, "//div[contains(@class, 'avatar') or contains(@class, 'user-avatar')]")
            return True
        except NoSuchElementException:
            return False
            
    def search_keyword(self, keyword, max_pages=5, get_details=False, save_interval=2):
        """搜索关键词并抓取数据
        
        Args:
            keyword (str): 要搜索的关键词
            max_pages (int, optional): 最大翻页数. Defaults to 5.
            get_details (bool, optional): 是否获取帖子详情. Defaults to False.
            save_interval (int, optional): 每抓取多少页保存一次数据. Defaults to 2.
        
        Returns:
            pd.DataFrame: 包含抓取数据的DataFrame
        """
        if not self.driver:
            self.start_browser()
            
        self.current_keyword = keyword
        self.data = []  # 清空之前的数据
        
        try:
            # 打开搜索页面
            self.driver.get(self.search_url)
            logger.info(f"正在搜索关键词: {keyword}")
            
            # 等待搜索框出现并输入关键词
            wait = WebDriverWait(self.driver, self.wait_time)
            search_input = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "input[type='search']")))
            search_input.clear()
            search_input.send_keys(keyword)
            search_input.send_keys(Keys.ENTER)
            
            # 等待搜索结果加载
            time.sleep(3)
            
            # 设置筛选条件（笔记）
            try:
                notes_tab = wait.until(EC.element_to_be_clickable((By.XPATH, "//div[contains(text(), '笔记')]")))
                notes_tab.click()
                time.sleep(2)
            except:
                logger.warning("未找到'笔记'标签，继续使用默认搜索结果")
                
            # 翻页抓取数据
            for page in range(max_pages):
                logger.info(f"正在抓取第 {page+1}/{max_pages} 页数据...")
                
                posts = self._extract_posts_from_current_page()
                
                # 如果需要获取详情，则遍历帖子链接获取详情
                if get_details and posts:
                    for i, post in enumerate(posts):
                        if 'url' in post and post['url']:
                            logger.info(f"正在获取第 {i+1}/{len(posts)} 个帖子详情: {post['title'][:20]}...")
                            try:
                                detail = self.get_post_detail(post['url'])
                                if detail:
                                    # 更新帖子信息
                                    post.update({k: v for k, v in detail.items() if k not in post})
                                    time.sleep(random.uniform(1, 2))  # 随机等待
                            except Exception as e:
                                logger.error(f"获取帖子详情失败: {str(e)}")
                
                # 间隔保存数据
                if (page + 1) % save_interval == 0:
                    self._save_data(f"xiaohongshu_{keyword}_page{page+1}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.csv")
                
                # 尝试点击下一页
                try:
                    next_button = wait.until(EC.element_to_be_clickable((By.XPATH, "//div[contains(@class, 'pagination')]/button[text()='下一页']")))
                    if not next_button.is_enabled():
                        logger.info("已到达最后一页")
                        break
                    next_button.click()
                    time.sleep(random.uniform(2, 4))  # 随机等待避免被检测
                except (NoSuchElementException, ElementClickInterceptedException, TimeoutException) as e:
                    logger.warning(f"点击下一页按钮失败: {str(e)}")
                    break
            
            # 保存最终数据
            return self._save_data(f"xiaohongshu_{keyword}_final_{datetime.now().strftime('%Y%m%d_%H%M%S')}.csv")
            
        except Exception as e:
            logger.error(f"搜索过程中出错: {str(e)}")
            # 即使出错也尝试保存已抓取的数据
            return self._save_data(f"xiaohongshu_{keyword}_error_{datetime.now().strftime('%Y%m%d_%H%M%S')}.csv")
            
    def _save_data(self, filename):
        """保存数据到CSV文件
        
        Args:
            filename (str): 文件名
            
        Returns:
            pd.DataFrame: 保存的数据
        """
        if not self.data:
            logger.warning("没有数据可保存")
            return pd.DataFrame()
            
        df = pd.DataFrame(self.data)
        output_file = os.path.join(self.data_dir, filename)
        
        try:
            df.to_csv(output_file, index=False, encoding='utf-8-sig')
            logger.info(f"数据已保存到 {output_file}，共 {len(df)} 条记录")
        except Exception as e:
            logger.error(f"保存数据失败: {str(e)}")
            
        return df
        
    def _extract_posts_from_current_page(self):
        """从当前页面提取帖子数据
        
        Returns:
            list: 当前页面的帖子数据列表
        """
        wait = WebDriverWait(self.driver, self.wait_time)
        page_posts = []
        
        try:
            # 等待帖子容器加载
            post_cards = wait.until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, "div.cover-item, div.note-item")))
            
            logger.info(f"找到 {len(post_cards)} 个帖子")
            
            for card in post_cards:
                try:
                    # 提取帖子信息（根据实际页面结构调整选择器）
                    title_element = card.find_element(By.CSS_SELECTOR, "div.title, div.note-title")
                    title = title_element.text.strip() if title_element else "无标题"
                    
                    user_info_element = card.find_element(By.CSS_SELECTOR, "div.user-info, div.author-wrapper")
                    author = user_info_element.text.strip() if user_info_element else "未知作者"
                    
                    # 尝试提取点赞数
                    try:
                        likes_element = card.find_element(By.CSS_SELECTOR, "div.like-wrapper span, div.like-num")
                        likes = likes_element.text.strip() if likes_element else "0"
                    except NoSuchElementException:
                        likes = "0"
                    
                    # 尝试提取收藏数
                    try:
                        collect_element = card.find_element(By.CSS_SELECTOR, "div.collect-wrapper span, div.collect-num")
                        collects = collect_element.text.strip() if collect_element else "0"
                    except NoSuchElementException:
                        collects = "0"
                    
                    # 尝试提取评论数
                    try:
                        comment_element = card.find_element(By.CSS_SELECTOR, "div.comment-wrapper span, div.comment-num")
                        comments_count = comment_element.text.strip() if comment_element else "0"
                    except NoSuchElementException:
                        comments_count = "0"
                    
                    # 尝试提取帖子链接
                    try:
                        url_element = card.find_element(By.CSS_SELECTOR, "a")
                        url = url_element.get_attribute("href") if url_element else ""
                    except NoSuchElementException:
                        url = ""
                    
                    # 尝试提取帖子描述
                    try:
                        desc_element = card.find_element(By.CSS_SELECTOR, "div.desc, div.note-desc")
                        description = desc_element.text.strip() if desc_element else ""
                    except NoSuchElementException:
                        description = ""
                    
                    # 尝试提取帖子图片
                    try:
                        img_element = card.find_element(By.CSS_SELECTOR, "img.cover, img.note-cover")
                        image_url = img_element.get_attribute("src") if img_element else ""
                    except NoSuchElementException:
                        image_url = ""
                    
                    post_data = {
                        "title": title,
                        "author": author,
                        "likes": likes,
                        "collects": collects,
                        "comments_count": comments_count,
                        "description": description,
                        "image_url": image_url,
                        "url": url,
                        "source_keyword": self.current_keyword,
                        "crawl_time": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
                    }
                    
                    page_posts.append(post_data)
                    self.data.append(post_data)
                    
                except Exception as e:
                    logger.error(f"提取帖子信息出错: {str(e)}")
                    continue
                
            return page_posts
                
        except TimeoutException:
            logger.error("等待帖子加载超时")
            return []
    
    def get_post_detail(self, url):
        """获取帖子详情
        
        Args:
            url (str): 帖子URL
            
        Returns:
            dict: 帖子详细信息
        """
        if not self.driver:
            self.start_browser()
            
        # 保存当前窗口句柄
        current_window = self.driver.current_window_handle
        
        try:
            # 在新标签页中打开链接
            self.driver.execute_script(f"window.open('{url}');")
            
            # 切换到新标签页
            time.sleep(2)
            new_window = [window for window in self.driver.window_handles if window != current_window][0]
            self.driver.switch_to.window(new_window)
            
            wait = WebDriverWait(self.driver, self.wait_time)
            
            # 等待帖子内容加载
            content_element = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "div.content, div.note-content")))
            
            # 提取帖子详细信息
            try:
                title_element = self.driver.find_element(By.CSS_SELECTOR, "h1.title, div.note-title")
                title = title_element.text.strip() if title_element else "无标题"
            except NoSuchElementException:
                title = "无标题"
                
            content = content_element.text.strip()
            
            try:
                author_element = self.driver.find_element(By.CSS_SELECTOR, "div.author-name, div.user-nickname")
                author = author_element.text.strip() if author_element else "未知作者"
            except NoSuchElementException:
                author = "未知作者"
                
            try:
                publish_time_element = self.driver.find_element(By.CSS_SELECTOR, "div.publish-time, div.note-time")
                publish_time = publish_time_element.text.strip() if publish_time_element else ""
            except NoSuchElementException:
                publish_time = ""
                
            # 提取点赞数、收藏数、评论数
            try:
                like_element = self.driver.find_element(By.CSS_SELECTOR, "div.like-count, span.like-count")
                likes = like_element.text.strip() if like_element else "0"
            except NoSuchElementException:
                likes = "0"
                
            try:
                collect_element = self.driver.find_element(By.CSS_SELECTOR, "div.collect-count, span.collect-count")
                collects = collect_element.text.strip() if collect_element else "0"
            except NoSuchElementException:
                collects = "0"
                
            try:
                comment_count_element = self.driver.find_element(By.CSS_SELECTOR, "div.comment-count, span.comment-count")
                comments_count = comment_count_element.text.strip() if comment_count_element else "0"
            except NoSuchElementException:
                comments_count = "0"
                
            # 提取图片
            images = []
            try:
                img_elements = self.driver.find_elements(By.CSS_SELECTOR, "img.note-image, div.image-container img")
                for img in img_elements:
                    img_url = img.get_attribute("src")
                    if img_url:
                        images.append(img_url)
            except:
                pass
                
            # 提取评论
            comments = []
            try:
                # 滚动到评论区
                self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
                time.sleep(2)
                
                comment_elements = self.driver.find_elements(By.CSS_SELECTOR, "div.comment-item, div.comment-card")
                for comment in comment_elements[:10]:  # 只获取前10条评论
                    try:
                        comment_author_element = comment.find_element(By.CSS_SELECTOR, "div.comment-author, div.user-nickname")
                        comment_author = comment_author_element.text.strip() if comment_author_element else "未知用户"
                        
                        comment_content_element = comment.find_element(By.CSS_SELECTOR, "div.comment-content, div.content")
                        comment_content = comment_content_element.text.strip() if comment_content_element else ""
                        
                        comment_time_element = comment.find_element(By.CSS_SELECTOR, "div.comment-time, div.time")
                        comment_time = comment_time_element.text.strip() if comment_time_element else ""
                        
                        comments.append({
                            "author": comment_author,
                            "content": comment_content,
                            "time": comment_time
                        })
                    except Exception as e:
                        logger.error(f"提取评论出错: {str(e)}")
                        continue
            except Exception as e:
                logger.error(f"提取评论区出错: {str(e)}")
            
            # 提取标签
            tags = []
            try:
                tag_elements = self.driver.find_elements(By.CSS_SELECTOR, "span.tag, div.tag")
                for tag in tag_elements:
                    tag_text = tag.text.strip()
                    if tag_text and tag_text != "#":
                        tags.append(tag_text.replace("#", ""))
            except:
                pass
            
            post_detail = {
                "title": title,
                "content": content,
                "author": author,
                "publish_time": publish_time,
                "likes_count": likes,
                "collects_count": collects,
                "comments_count": comments_count,
                "url": url,
                "images": images,
                "tags": tags,
                "comments": comments,
                "crawl_time": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
            }
            
            return post_detail
            
        except Exception as e:
            logger.error(f"获取帖子详情出错: {str(e)}")
            return None
        finally:
            # 关闭当前标签页并返回原标签页
            self.driver.close()
            self.driver.switch_to.window(current_window)


def main():
    """主函数"""
    parser = argparse.ArgumentParser(description="小红书高级爬虫工具")
    parser.add_argument("-k", "--keywords", type=str, help="要搜索的关键词，多个关键词用逗号分隔")
    parser.add_argument("-p", "--pages", type=int, default=5, help="每个关键词抓取的最大页数 (默认5)")
    parser.add_argument("-d", "--details", action="store_true", help="是否获取帖子详情")
    parser.add_argument("-u", "--username", type=str, help="小红书用户名/手机号")
    parser.add_argument("-w", "--password", type=str, help="小红书密码")
    parser.add_argument("--headless", action="store_true", help="是否使用无头模式")
    parser.add_argument("--data-dir", type=str, default="data", help="数据保存目录 (默认'data')")
    parser.add_argument("--no-wait", action="store_true", help="不等待扫码登录，直接继续")
    
    args = parser.parse_args()
    
    if not args.keywords:
        args.keywords = input("请输入要搜索的关键词，多个关键词用逗号分隔: ")
    
    keywords = [k.strip() for k in args.keywords.split(",") if k.strip()]
    
    if not keywords:
        logger.error("未提供有效的关键词")
        return
    
    # 创建爬虫实例
    scraper = AdvancedXiaohongshuScraper(headless=args.headless, data_dir=args.data_dir)
    
    try:
        # 尝试登录
        if args.username and args.password:
            if not scraper.login(args.username, args.password, wait_scan=not args.no_wait):
                logger.warning("登录失败，将以非登录状态继续")
        else:
            logger.info("未提供登录凭据，将使用扫码登录")
            scraper.login(wait_scan=not args.no_wait)  # 尝试使用cookies或扫码登录
        
        # 对每个关键词进行搜索
        for keyword in keywords:
            logger.info(f"开始抓取关键词: {keyword}")
            df = scraper.search_keyword(keyword, max_pages=args.pages, get_details=args.details)
            logger.info(f"关键词 '{keyword}' 共抓取到 {len(df)} 条数据")
            
    except KeyboardInterrupt:
        logger.info("用户中断，正在保存已抓取的数据...")
        scraper._save_data(f"xiaohongshu_interrupted_{datetime.now().strftime('%Y%m%d_%H%M%S')}.csv")
    finally:
        # 关闭浏览器
        scraper.close_browser()


if __name__ == "__main__":
    main() 