import asyncio
from pathlib import Path
import shutil
from crawlee.crawlers import PlaywrightCrawler, PlaywrightCrawlingContext
from up_info_get import up_tag_get

async def main():
        
    # 创建爬虫实例，直接配置浏览器选项
    crawler = PlaywrightCrawler(
        # 浏览器类型
        browser_type='chromium',
        # 是否显示浏览器窗口
        headless=False,
        # 最大请求数
        max_requests_per_crawl=5,
        # 关键设置：用户数据目录，用于保存登录状态
        user_data_dir="./user_data/xiaohongshu"
    )
    
    @crawler.router.default_handler
    async def handle_request(context: PlaywrightCrawlingContext):
        context.log.info(f"正在处理: {context.request.url}")
        
        # 等待页面加载

        #await context.page.wait_for_load_state('networkidle')
        
        context.log.info("等待3秒")
        await context.page.wait_for_timeout(3000)
        context.log.info("等待3秒结束")

        # 检查登录状态
        try:
            # 检查是否出现登录弹窗
            login_popup_count = await context.page.locator('.login-reason').count()
            print(login_popup_count)
            if login_popup_count > 0:
                context.log.info("检测到登录弹窗，需要登录")
                context.log.info("====================================")
                context.log.info("请在浏览器窗口中完成扫码登录操作...")
                context.log.info("登录后会自动继续")
                context.log.info("====================================")
                
                # 等待登录弹窗消失，说明登录成功
                await context.page.wait_for_selector('.login-reason', 
                                                   state='hidden',
                                                   timeout=120000)
                context.log.info("✅ 登录成功!")
            
        except Exception as e:
            context.log.error(f"检查登录状态时出错: {str(e)}")
        
        # 博主url
        blogger_url = context.request.url
        context.log.info(f"博主url: {blogger_url}")

        # 获取博主名
        blogger_name = await context.page.locator('div.user-name').inner_text()
        context.log.info(f"博主名: {blogger_name}")

        # 获取博主小红书ID
        blogger_redid = await context.page.locator('span.user-redId').inner_text()
        context.log.info(f"博主小红书ID: {blogger_redid}")

        # 获取博主IP所属地
        blogger_ip = await context.page.locator('span.user-IP').inner_text()
        context.log.info(f"博主IP所属地: {blogger_ip}")

        # 获取博主主页页面介绍
        blogger_desc = await context.page.locator('div.user-desc').inner_text()
        context.log.info(f"博主主页页面介绍: {blogger_desc}")

        # 获取网页源码
        html_content = await context.page.content()

        # 获取博主tag
        # info_tag
        # age_tag
        # location_tag
        # profession_tag
        # college_tag
        up_tag_list = up_tag_get(html_content)
        blogger_tag_gender = up_tag_list[0]
        blogger_tag_age = up_tag_list[1]
        blogger_tag_location = up_tag_list[2]
        blogger_tag_profession = up_tag_list[3]
        blogger_tag_college = up_tag_list[4]
        context.log.info(f"博主tag: {blogger_tag_gender}, {blogger_tag_age}, {blogger_tag_location}, {blogger_tag_profession}, {blogger_tag_college}")

        # 获取博主关注数
        try:
            # 通过文本内容定位
            follows_count = await context.page.locator('div.user-interactions div:has(span:text("关注")) span.count').inner_text()
            fans_count = await context.page.locator('div.user-interactions div:has(span:text("粉丝")) span.count').inner_text()
            interaction_count = await context.page.locator('div.user-interactions div:has(span:text("获赞与收藏")) span.count').inner_text()
            
            context.log.info(f"博主关注数: {follows_count}, 博主粉丝数: {fans_count}, 博主点赞与收藏数: {interaction_count}")

        except Exception as e:
            context.log.error(f"获取互动数据时出错: {str(e)}")
            follows_count = "0"
            fans_count = "0"
            interaction_count = "0"

        # 保存数据
        await context.push_data({
            'blogger_url': blogger_url,
            'blogger_name': blogger_name,
            'blogger_redid': blogger_redid,
            'blogger_ip': blogger_ip,
            'blogger_desc': blogger_desc,
            'blogger_tag_gender': blogger_tag_gender,
            'blogger_tag_age': blogger_tag_age,
            'blogger_tag_location': blogger_tag_location,
            'blogger_tag_profession': blogger_tag_profession,
            'blogger_tag_college': blogger_tag_college,
            'blogger_follows_num': follows_count,
            'blogger_fans_num': fans_count,
            'blogger_interaction_num': interaction_count
        })
        

    # 启动爬虫，从小红书首页开始
    await crawler.run(["https://www.xiaohongshu.com/user/profile/57b7d72b5e87e73bdbfae283?xsec_token=ABELikS4aujWeajSuJXwzGUURm07Zr6JWWtebMLZhABYY=&xsec_source=pc_feed"])

if __name__ == "__main__":
    asyncio.run(main())