import asyncio
from pathlib import Path
import shutil
from crawlee.crawlers import PlaywrightCrawler, PlaywrightCrawlingContext
from note_info_get import get_element_1, get_last_update_time, get_three_data, get_ip_location, get_note_author_info
import random
from datetime import datetime, timedelta
import json

def process_like_count(like_text):
    """处理点赞数，如果出错则返回原值"""
    try:
        return '0' if like_text == '赞' else like_text
    except Exception as e:
        print(f"处理点赞数出错: {e}, 原始值: {like_text}")
        return like_text

def process_time(time_text):
    """处理时间格式，如果出错则返回原值"""
    try:
        now = datetime.now()
        
        # 处理"刚刚"
        if time_text == "刚刚":
            return now.strftime("%Y-%m-%d")
        
        # 处理"今天 HH:MM"
        if time_text.startswith("今天"):
            return now.strftime("%Y-%m-%d")
        
        # 处理"昨天 HH:MM"
        if time_text.startswith("昨天"):
            yesterday = now - timedelta(days=1)
            return yesterday.strftime("%Y-%m-%d")
        
        # 处理"N 天前"
        if "天前" in time_text:
            days = int(time_text.split()[0])
            past_date = now - timedelta(days=days)
            return past_date.strftime("%Y-%m-%d")
        
        # 处理"MM-DD"格式
        if len(time_text) == 5 and "-" in time_text:
            return f"{now.year}-{time_text}"
        
        return time_text
    except Exception as e:
        print(f"处理时间出错: {e}, 原始值: {time_text}")
        return time_text
    
async def main():
        
    # 创建爬虫实例，直接配置浏览器选项
    crawler = PlaywrightCrawler(
        # 浏览器类型
        browser_type='chromium',
        # 是否显示浏览器窗口
        headless=False,
        # 最大请求数
        max_requests_per_crawl=5,
        # 关键设置：用户数据目录，用于保存登录状态
        user_data_dir="./user_data/xiaohongshu",
        # 增加超时
        request_handler_timeout=timedelta(seconds=300)
    )
    
    @crawler.router.default_handler
    async def handle_request(context: PlaywrightCrawlingContext):
        context.log.info(f"正在处理: {context.request.url}")
        
        # 等待页面加载
        context.log.info("等待3秒")
        await context.page.wait_for_timeout(3000)
        context.log.info("等待3秒结束")

        # 检查登录状态
        try:
            # 检查是否出现登录弹窗
            login_popup_count = await context.page.locator('.login-reason').count()
            print(login_popup_count)
            if login_popup_count > 0:
                context.log.info("检测到登录弹窗，需要登录")
                context.log.info("====================================")
                context.log.info("请在浏览器窗口中完成扫码登录操作...")
                context.log.info("登录后会自动继续")
                context.log.info("====================================")
                
                # 等待登录弹窗消失，说明登录成功
                await context.page.wait_for_selector('.login-reason', 
                                                   state='hidden',
                                                   timeout=120000)
                context.log.info("✅ 登录成功!")
            
        except Exception as e:
            context.log.error(f"检查登录状态时出错: {str(e)}")
        
        # 等待评论容器加载
        await context.page.wait_for_selector("div.comments-container")
        
        # 收集评论数据
        comments_data = await extract_all_comments(context)
        
        # 打印评论信息（用于调试）
        for comment in comments_data:
            context.log.info(f"\n父评论 ID: {comment['comment_id']}")
            context.log.info(f"作者: {comment['author']}")
            context.log.info(f"用户ID: {comment['user_id']}")
            context.log.info(f"内容: {comment['content']}")
            context.log.info(f"点赞: {comment['likes']}")
            context.log.info(f"时间: {comment['time']}")
            context.log.info(f"地点: {comment['location']}")
            
            if comment['replies']:
                context.log.info("\n回复:")
                for reply in comment['replies']:
                    context.log.info(f"  |- 评论 ID: {reply['comment_id']}")
                    context.log.info(f"  |- 作者: {reply['author']}")
                    context.log.info(f"  |- 用户ID: {reply['user_id']}")
                    context.log.info(f"  |- 回复给: {reply['reply_to']}")
                    context.log.info(f"  |- 内容: {reply['content']}")
                    context.log.info(f"  |- 点赞: {reply['likes']}")
                    context.log.info(f"  |- 时间: {reply['time']}")
                    context.log.info(f"  |- 地点: {reply['location']}")
                    context.log.info("  |")
        
        # 保存数据
        with open('comments.json', 'w', encoding='utf-8') as f:
            json.dump(comments_data, f, ensure_ascii=False, indent=2)
        
        await context.push_data({
            "comments": comments_data
        })

    # 启动爬虫，从小红书首页开始
    await crawler.run(["https://www.xiaohongshu.com/explore/6816f9240000000021003137?xsec_token=AB0PyOjgV-U_rc_tcFTxGHYQ-GF1qfZKr3f-oeysA98R8=&xsec_source=pc_feed"])

async def scroll_and_load_comments(context):
    """评论区父评论懒加载"""
    # 先点击内部元素激活评论窗口
    await context.page.locator('div.total').click()

    # 记录初始parent-comment长度, 循环次数
    initial_parent_comment_len = await context.page.locator('div.comments-container div.parent-comment').count()
    consecutive_same_count = 0  # 连续相同次数计数
    i = 0

    # 循环懒加载
    while True:
        # 模拟滚轮
        await context.page.mouse.wheel(0, random.randint(800, 1500))

        # 随机等待时间
        await context.page.wait_for_timeout(random.randint(1500, 2500))

        middle_parent_comment_len = await context.page.locator('div.comments-container div.parent-comment').count()
        context.log.info(f"第{i}次循环：初始长度为:{initial_parent_comment_len}, 结束长度为:{middle_parent_comment_len}")

        # 检查评论数是否变化
        if middle_parent_comment_len == initial_parent_comment_len:
            consecutive_same_count += 1
            if consecutive_same_count >= 5:  # 连续5次没有变化则退出
                context.log.info("连续5次未加载新评论，退出循环")
                return
        else:
            consecutive_same_count = 0  # 如果有新评论加载，重置计数器

        initial_parent_comment_len = middle_parent_comment_len
        i += 1

async def expand_all_replies(context):
    """展开所有"显示更多"回复"""
    while True:
        # 查找所有"显示更多"按钮
        show_more_buttons = await context.page.query_selector_all("div.reply-container > div.show-more")
        if not show_more_buttons:
            break
        
        for button in show_more_buttons:
            try:
                # 确保按钮可见
                if await button.is_visible():
                    # 点击按钮
                    await button.click()
                    # 等待新评论加载
                    await context.page.wait_for_timeout(random.choice([1000, 2000]))  # 等待让内容加载
            except Exception as e:
                context.log.error(f"点击显示更多按钮时出错: {e}")
        
        # 短暂等待以防止过快点击
        await context.page.wait_for_timeout(500)

async def extract_comment_info(context):
    """提取评论信息"""
    # 获取所有评论信息
    comments_data = await context.page.evaluate("""
        () => {
            const comments = [];
            // 获取所有父评论
            document.querySelectorAll('div.parent-comment').forEach(parentDiv => {
                // 获取父评论信息
                const parentComment = parentDiv.querySelector('div.comment-item');
                const parentAuthorLink = parentComment.querySelector('.author .name');
                const parentData = {
                    comment_id: parentComment.id.replace('comment-', ''),
                    author: parentAuthorLink.innerText,
                    user_id: parentAuthorLink.getAttribute('data-user-id') || '',
                    content: parentComment.querySelector('.note-text').innerText,
                    likes: parentComment.querySelector('.like .count').innerText,
                    time: parentComment.querySelector('.date span').innerText,
                    location: parentComment.querySelector('.location')?.innerText || '',
                    replies: []
                };
                
                // 获取子评论信息
                const replyContainer = parentDiv.querySelector('.reply-container');
                if (replyContainer) {
                    replyContainer.querySelectorAll('.comment-item-sub').forEach(replyDiv => {
                        const replyAuthorLink = replyDiv.querySelector('.author .name');
                        const replyData = {
                            comment_id: replyDiv.id.replace('comment-', ''),
                            author: replyAuthorLink.innerText,
                            user_id: replyAuthorLink.getAttribute('data-user-id') || '',
                            content: replyDiv.querySelector('.note-text').innerText,
                            likes: replyDiv.querySelector('.like .count').innerText,
                            time: replyDiv.querySelector('.date span').innerText,
                            location: replyDiv.querySelector('.location')?.innerText || '',
                            reply_to: replyDiv.querySelector('.nickname')?.innerText || ''
                        };
                        parentData.replies.push(replyData);
                    });
                }
                
                comments.push(parentData);
            });
            return comments;
        }
    """)
    
    # 处理评论数据
    try:
        for comment in comments_data:
            try:
                # 处理父评论
                comment['likes'] = process_like_count(comment['likes'])
                comment['time'] = process_time(comment['time'])
            except Exception as e:
                context.log.error(f"处理父评论数据出错: {e}")
            
            # 处理回复
            try:
                for reply in comment['replies']:
                    reply['likes'] = process_like_count(reply['likes'])
                    reply['time'] = process_time(reply['time'])
            except Exception as e:
                context.log.error(f"处理回复数据出错: {e}")
    except Exception as e:
        context.log.error(f"处理评论数据出错: {e}")
    
    return comments_data

async def extract_all_comments(context):
    """处理并提取所有评论"""
    try:
        # 1. 首先处理懒加载，加载所有评论
        await scroll_and_load_comments(context)
        
        # 2. 展开所有回复
        await expand_all_replies(context)
        
        # 3. 提取评论信息
        return await extract_comment_info(context)
        
    except Exception as e:
        context.log.error(f"爬取评论过程中出错: {e}")
        import traceback
        context.log.error(traceback.format_exc())
        return []

if __name__ == "__main__":
    asyncio.run(main())