# 声明：本代码仅供学习和研究目的使用。使用者应遵守以下原则：
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率，避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。


import asyncio
import os
import random
import time
from asyncio import Task
from typing import Dict, List, Optional, Tuple

from playwright.async_api import BrowserContext, BrowserType, Page, async_playwright
from tenacity import RetryError

import config
from base.base_crawler import AbstractCrawler
from config import CRAWLER_MAX_COMMENTS_COUNT_SINGLENOTES
from model.m_xiaohongshu import NoteUrlInfo
from proxy.proxy_ip_pool import IpInfoModel, create_ip_pool
from store import xhs as xhs_store
from tools import utils
from var import crawler_type_var, source_keyword_var

from .client import XiaoHongShuClient
from .exception import DataFetchError
from .field import SearchSortType
from .help import parse_note_info_from_note_url, get_search_id
from .login import XiaoHongShuLogin


class XiaoHongShuCrawler(AbstractCrawler):
    context_page: Page
    xhs_client: XiaoHongShuClient
    browser_context: BrowserContext

    def __init__(self, **kwargs) -> None:
        self.index_url = "https://www.xiaohongshu.com"
        # self.user_agent = utils.get_user_agent()
        self.user_agent = config.UA if config.UA else "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
        
        # 从kwargs中获取限制值，如果没有则使用默认值
        self.max_notes_count = kwargs.get('max_notes_count', config.CRAWLER_MAX_NOTES_COUNT)
        self.max_comments_count = kwargs.get('max_comments_count', config.CRAWLER_MAX_COMMENTS_COUNT_SINGLENOTES)
        self.keywords = kwargs.get('keywords', config.KEYWORDS)
        
        # 更新配置
        config.CRAWLER_MAX_NOTES_COUNT = self.max_notes_count
        config.CRAWLER_MAX_COMMENTS_COUNT_SINGLENOTES = self.max_comments_count
        
        utils.logger.info(f"[XiaoHongShuCrawler.__init__] 初始化爬虫实例:")
        utils.logger.info(f"[XiaoHongShuCrawler.__init__] - max_notes_count: {self.max_notes_count}")
        utils.logger.info(f"[XiaoHongShuCrawler.__init__] - max_comments_count: {self.max_comments_count}")
        utils.logger.info(f"[XiaoHongShuCrawler.__init__] - keywords: {self.keywords}")

    async def start(self) -> None:
        utils.logger.info("[XiaoHongShuCrawler.start] Starting crawler...")
        
        playwright_proxy_format, httpx_proxy_format = None, None
        if config.ENABLE_IP_PROXY:
            utils.logger.info("[XiaoHongShuCrawler.start] Setting up proxy...")
            ip_proxy_pool = await create_ip_pool(
                config.IP_PROXY_POOL_COUNT, enable_validate_ip=True
            )
            ip_proxy_info: IpInfoModel = await ip_proxy_pool.get_proxy()
            playwright_proxy_format, httpx_proxy_format = self.format_proxy_info(
                ip_proxy_info
            )
            utils.logger.info(f"[XiaoHongShuCrawler.start] Proxy configured: {httpx_proxy_format}")

        async with async_playwright() as playwright:
            utils.logger.info("[XiaoHongShuCrawler.start] Launching browser...")
            chromium = playwright.chromium
            self.browser_context = await self.launch_browser(
                chromium, None, self.user_agent, headless=config.HEADLESS
            )
            
            utils.logger.info("[XiaoHongShuCrawler.start] Adding stealth script...")
            await self.browser_context.add_init_script(path="libs/stealth.min.js")
            
            utils.logger.info("[XiaoHongShuCrawler.start] Setting up cookies...")
            await self.browser_context.add_cookies([
                {
                    "name": "webId",
                    "value": "xxx123",
                    "domain": ".xiaohongshu.com",
                    "path": "/",
                }
            ])
            
            utils.logger.info("[XiaoHongShuCrawler.start] Creating new page...")
            self.context_page = await self.browser_context.new_page()
            
            # 简化页面加载逻辑
            utils.logger.info("[XiaoHongShuCrawler.start] Navigating to index page...")
            try:
                await self.context_page.goto(self.index_url, timeout=30000)
                await self.context_page.wait_for_load_state("domcontentloaded", timeout=30000)
                
                # 检查页面是否正常加载
                content = await self.context_page.content()
                if "小红书" not in content:
                    utils.logger.warning("[XiaoHongShuCrawler.start] Page may not be loaded properly, retrying...")
                    await self.context_page.reload()
                    await self.context_page.wait_for_load_state("domcontentloaded", timeout=30000)
                
            except Exception as e:
                utils.logger.error(f"[XiaoHongShuCrawler.start] Page load error: {str(e)}")
                # 尝试重新加载页面
                try:
                    await self.context_page.reload()
                    await self.context_page.wait_for_load_state("domcontentloaded", timeout=30000)
                except Exception as reload_err:
                    utils.logger.error(f"[XiaoHongShuCrawler.start] Page reload failed: {str(reload_err)}")
                    raise

            # Create a client to interact with the xiaohongshu website.
            utils.logger.info("[XiaoHongShuCrawler.start] Creating XHS client...")
            self.xhs_client = await self.create_xhs_client(httpx_proxy_format)
            
            utils.logger.info("[XiaoHongShuCrawler.start] Checking login status...")
            if not await self.xhs_client.pong():
                utils.logger.info("[XiaoHongShuCrawler.start] Login required, starting login process...")
                login_obj = XiaoHongShuLogin(
                    login_type=config.LOGIN_TYPE,
                    login_phone="",  # input your phone number
                    browser_context=self.browser_context,
                    context_page=self.context_page,
                    cookie_str=config.COOKIES,
                )
                await login_obj.begin()
                await self.xhs_client.update_cookies(
                    browser_context=self.browser_context
                )
                utils.logger.info("[XiaoHongShuCrawler.start] Login completed")

            crawler_type_var.set(config.CRAWLER_TYPE)
            utils.logger.info(f"[XiaoHongShuCrawler.start] Starting crawler type: {config.CRAWLER_TYPE}")
            
            if config.CRAWLER_TYPE == "search":
                # Search for notes and retrieve their comment information.
                await self.search()
            elif config.CRAWLER_TYPE == "detail":
                # Get the information and comments of the specified post
                await self.get_specified_notes()
            elif config.CRAWLER_TYPE == "creator":
                # Get creator's information and their notes and comments
                await self.get_creators_and_notes()
            else:
                utils.logger.warning(f"[XiaoHongShuCrawler.start] Unknown crawler type: {config.CRAWLER_TYPE}")

            utils.logger.info("[XiaoHongShuCrawler.start] Crawler finished")

    async def search(self) -> None:
        """Search for notes and retrieve their comment information."""
        utils.logger.info(
            "[XiaoHongShuCrawler.search] Begin search xiaohongshu keywords"
        )
        processed_count = 0
        start_page = config.START_PAGE
        
        if not self.keywords:
            utils.logger.error("[XiaoHongShuCrawler.search] No keywords provided")
            return
            
        for keyword in self.keywords.split(","):
            source_keyword_var.set(keyword)
            utils.logger.info(
                f"[XiaoHongShuCrawler.search] Current search keyword: {keyword}"
            )
            page = 1
            search_id = get_search_id()
            
            while processed_count < self.max_notes_count:
                if page < start_page:
                    page += 1
                    continue

                try:
                    utils.logger.info(
                        f"[XiaoHongShuCrawler.search] Processing page {page}, processed {processed_count}/{self.max_notes_count} notes"
                    )
                    notes_res = await self.xhs_client.get_note_by_keyword(
                        keyword=keyword,
                        search_id=search_id,
                        page=page,
                        sort=(
                            SearchSortType(config.SORT_TYPE)
                            if config.SORT_TYPE != ""
                            else SearchSortType.GENERAL
                        ),
                    )
                    
                    utils.logger.info(f"[XiaoHongShuCrawler.search] Search response: {notes_res}")
                    
                    if not notes_res:
                        utils.logger.warning(f"[XiaoHongShuCrawler.search] Empty response for page {page}")
                        break
                        
                    items = notes_res.get("items", [])
                    if not items:
                        utils.logger.info("[XiaoHongShuCrawler.search] No more items to process")
                        break
                        
                    utils.logger.info(f"[XiaoHongShuCrawler.search] Found {len(items)} items on page {page}")
                    
                    valid_items = []
                    for item in items:
                        if not item:
                            continue
                        if item.get("model_type") not in ("rec_query", "hot_query"):
                            if item.get("id"):
                                valid_items.append(item)
                                
                    utils.logger.info(f"[XiaoHongShuCrawler.search] Found {len(valid_items)} valid items on page {page}")
                    
                    if not valid_items:
                        utils.logger.info("[XiaoHongShuCrawler.search] No valid notes in current page")
                        page += 1
                        continue
                        
                    remaining_count = self.max_notes_count - processed_count
                    items_to_process = valid_items[:remaining_count]
                    
                    utils.logger.info(f"[XiaoHongShuCrawler.search] Processing {len(items_to_process)} items")
                    
                    task_list = []
                    semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
                    
                    for post_item in items_to_process:
                        note_id = post_item.get("id")
                        xsec_source = post_item.get("xsec_source", "pc_search")  # 设置默认值
                        xsec_token = post_item.get("xsec_token")
                        
                        if note_id and xsec_token:  # 只检查必要字段
                            utils.logger.info(f"[XiaoHongShuCrawler.search] Adding task for note_id: {note_id}")
                            task = self.get_note_detail_async_task(
                                note_id=note_id,
                                xsec_source=xsec_source,
                                xsec_token=xsec_token,
                                semaphore=semaphore,
                            )
                            task_list.append(task)
                        else:
                            utils.logger.warning(f"[XiaoHongShuCrawler.search] Skipping invalid item: {post_item}")
                    
                    if task_list:
                        utils.logger.info(f"[XiaoHongShuCrawler.search] Executing {len(task_list)} tasks")
                        note_details = await asyncio.gather(*task_list)
                        processed_in_batch = 0
                        
                        for note_detail in note_details:
                            if not note_detail:
                                utils.logger.warning("[XiaoHongShuCrawler.search] Received empty note detail, skipping...")
                                continue
                                
                            try:
                                user_info = note_detail.get("user")
                                if not user_info:
                                    utils.logger.warning(f"[XiaoHongShuCrawler.search] No user info in note: {note_detail.get('note_id')}")
                                    continue
                                    
                                # 暂时注释掉保存用户信息的代码
                                # if user_info.get("user_id"):
                                #     await xhs_store.save_creator(user_info.get("user_id"), {"basicInfo": user_info})
                                
                                await xhs_store.update_xhs_note(note_detail)
                                
                                # 获取评论
                                if config.ENABLE_GET_COMMENTS:
                                    note_id = note_detail.get("note_id")
                                    xsec_token = note_detail.get("xsec_token")
                                    if note_id and xsec_token:
                                        await self.get_comments(
                                            note_id=note_id,
                                            xsec_token=xsec_token,
                                            semaphore=asyncio.Semaphore(config.MAX_CONCURRENCY_NUM),
                                            max_comments=self.max_comments_count  # 使用传入的评论数量限制
                                        )
                                
                                processed_count += 1
                                processed_in_batch += 1
                                
                                utils.logger.info(f"[XiaoHongShuCrawler.search] Successfully processed note: {note_detail.get('note_id')}")
                                
                                if processed_count >= self.max_notes_count:
                                    utils.logger.info("[XiaoHongShuCrawler.search] Reached max notes count")
                                    break
                                    
                            except Exception as e:
                                utils.logger.error(f"[XiaoHongShuCrawler.search] Error processing note detail: {str(e)}")
                                utils.logger.error(f"[XiaoHongShuCrawler.search] Note detail: {note_detail}")
                                continue
                                
                        utils.logger.info(f"[XiaoHongShuCrawler.search] Processed {processed_in_batch} notes in current batch")
                        
                        if processed_in_batch == 0:
                            utils.logger.warning("[XiaoHongShuCrawler.search] No notes processed in current batch")
                            break
                            
                    page += 1
                    await asyncio.sleep(1)  # 添加延迟，避免请求过快
                    
                except DataFetchError as e:
                    utils.logger.error(
                        f"[XiaoHongShuCrawler.search] Data fetch error for keyword {keyword}, page {page}: {str(e)}"
                    )
                    break
                except Exception as e:
                    utils.logger.error(
                        f"[XiaoHongShuCrawler.search] Unexpected error for keyword {keyword}, page {page}: {str(e)}"
                    )
                    break
                    
            if processed_count >= self.max_notes_count:
                utils.logger.info("[XiaoHongShuCrawler.search] Reached total max notes count")
                break

    async def get_creators_and_notes(self) -> None:
        """Get creator's notes and retrieve their comment information."""
        utils.logger.info(
            "[XiaoHongShuCrawler.get_creators_and_notes] Begin get xiaohongshu creators"
        )
        for user_id in config.XHS_CREATOR_ID_LIST:
            try:
                # get creator detail info from web html content
                creator_info: Dict = await self.xhs_client.get_creator_info(
                    user_id=user_id
                )
                
                if not creator_info:
                    utils.logger.warning(f"[XiaoHongShuCrawler.get_creators_and_notes] No creator info found for user_id: {user_id}")
                    continue
                    
                # 转换数据结构
                formatted_creator_info = {
                    "basicInfo": {
                        "user_id": user_id,
                        "nickname": creator_info.get("nickname", ""),
                        "desc": creator_info.get("desc", ""),
                        "gender": creator_info.get("gender", ""),
                        "images": creator_info.get("avatar", ""),
                        "ipLocation": creator_info.get("location", ""),
                    },
                    "interactions": [
                        {"type": "follows", "count": creator_info.get("follows", 0)},
                        {"type": "fans", "count": creator_info.get("fans", 0)},
                        {"type": "interaction", "count": creator_info.get("interaction", 0)}
                    ],
                    "tags": creator_info.get("tags", [])
                }
                
                utils.logger.info(f"[XiaoHongShuCrawler.get_creators_and_notes] Saving creator info for user_id: {user_id}")
                await xhs_store.save_creator(user_id, creator=formatted_creator_info)
                
            except Exception as e:
                utils.logger.error(f"[XiaoHongShuCrawler.get_creators_and_notes] Error processing creator {user_id}: {str(e)}")
                continue

            # When proxy is not enabled, increase the crawling interval
            if config.ENABLE_IP_PROXY:
                crawl_interval = random.random()
            else:
                crawl_interval = random.uniform(1, config.CRAWLER_MAX_SLEEP_SEC)
                
            try:
                # Get all note information of the creator
                all_notes_list = await self.xhs_client.get_all_notes_by_creator(
                    user_id=user_id,
                    crawl_interval=crawl_interval,
                    callback=self.fetch_creator_notes_detail,
                )

                if not all_notes_list:
                    utils.logger.warning(f"[XiaoHongShuCrawler.get_creators_and_notes] No notes found for user_id: {user_id}")
                    continue

                note_ids = []
                xsec_tokens = []
                for note_item in all_notes_list:
                    note_id = note_item.get("note_id")
                    xsec_token = note_item.get("xsec_token")
                    if note_id and xsec_token:
                        note_ids.append(note_id)
                        xsec_tokens.append(xsec_token)
                        
                if note_ids:
                    await self.batch_get_note_comments(note_ids, xsec_tokens)
                else:
                    utils.logger.warning(f"[XiaoHongShuCrawler.get_creators_and_notes] No valid notes found for user_id: {user_id}")
                    
            except Exception as e:
                utils.logger.error(f"[XiaoHongShuCrawler.get_creators_and_notes] Error getting notes for creator {user_id}: {str(e)}")
                continue

    async def fetch_creator_notes_detail(self, note_list: List[Dict]):
        """
        Concurrently obtain the specified post list and save the data
        """
        semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
        task_list = [
            self.get_note_detail_async_task(
                note_id=post_item.get("note_id"),
                xsec_source=post_item.get("xsec_source"),
                xsec_token=post_item.get("xsec_token"),
                semaphore=semaphore,
            )
            for post_item in note_list
        ]

        note_details = await asyncio.gather(*task_list)
        for note_detail in note_details:
            if note_detail:
                await xhs_store.update_xhs_note(note_detail)

    async def get_specified_notes(self):
        """
        Get the information and comments of the specified post
        must be specified note_id, xsec_source, xsec_token⚠️⚠️⚠️
        Returns:

        """
        get_note_detail_task_list = []
        for full_note_url in config.XHS_SPECIFIED_NOTE_URL_LIST:
            note_url_info: NoteUrlInfo = parse_note_info_from_note_url(full_note_url)
            utils.logger.info(
                f"[XiaoHongShuCrawler.get_specified_notes] Parse note url info: {note_url_info}"
            )
            crawler_task = self.get_note_detail_async_task(
                note_id=note_url_info.note_id,
                xsec_source=note_url_info.xsec_source,
                xsec_token=note_url_info.xsec_token,
                semaphore=asyncio.Semaphore(config.MAX_CONCURRENCY_NUM),
            )
            get_note_detail_task_list.append(crawler_task)

        need_get_comment_note_ids = []
        xsec_tokens = []
        note_details = await asyncio.gather(*get_note_detail_task_list)
        for note_detail in note_details:
            if note_detail:
                need_get_comment_note_ids.append(note_detail.get("note_id", ""))
                xsec_tokens.append(note_detail.get("xsec_token", ""))
                await xhs_store.update_xhs_note(note_detail)
        await self.batch_get_note_comments(need_get_comment_note_ids, xsec_tokens)

    async def get_note_detail_async_task(
        self,
        note_id: str,
        xsec_source: str,
        xsec_token: str,
        semaphore: asyncio.Semaphore,
    ) -> Optional[Dict]:
        """Get note detail

        Args:
            note_id:
            xsec_source:
            xsec_token:
            semaphore:

        Returns:
            Dict: note detail
        """
        async with semaphore:
            # When proxy is not enabled, increase the crawling interval
            if config.ENABLE_IP_PROXY:
                crawl_interval = random.random()
            else:
                crawl_interval = random.uniform(1, config.CRAWLER_MAX_SLEEP_SEC)
                
            try:
                # 尝试直接获取网页版笔记详情，携带cookie
                note_detail_from_html: Optional[Dict] = (
                    await self.xhs_client.get_note_by_id_from_html(
                        note_id, xsec_source, xsec_token, enable_cookie=True
                    )
                )
                
                # 使用异步休眠
                await asyncio.sleep(crawl_interval)
                
                if not note_detail_from_html:
                    # 如果网页版笔记详情获取失败，则尝试不使用cookie获取
                    note_detail_from_html = (
                        await self.xhs_client.get_note_by_id_from_html(
                            note_id, xsec_source, xsec_token, enable_cookie=False
                        )
                    )
                    
                if not note_detail_from_html:
                    # 如果网页版笔记详情获取失败，则尝试API获取
                    note_detail_from_api: Optional[Dict] = (
                        await self.xhs_client.get_note_by_id(
                            note_id, xsec_source, xsec_token
                        )
                    )
                    if not note_detail_from_api:
                        utils.logger.error(
                            f"[XiaoHongShuCrawler.get_note_detail_async_task] Get note detail error, note_id: {note_id}"
                        )
                        return None
                        
                note_detail = note_detail_from_html or note_detail_from_api
                if note_detail:
                    note_detail.update(
                        {"xsec_token": xsec_token, "xsec_source": xsec_source}
                    )
                    return note_detail
                    
            except DataFetchError as ex:
                utils.logger.error(
                    f"[XiaoHongShuCrawler.get_note_detail_async_task] Get note detail error: {ex}"
                )
                return None
            except KeyError as ex:
                utils.logger.error(
                    f"[XiaoHongShuCrawler.get_note_detail_async_task] have not fund note detail note_id:{note_id}, err: {ex}"
                )
                return None
            except Exception as ex:
                utils.logger.error(
                    f"[XiaoHongShuCrawler.get_note_detail_async_task] Unexpected error for note_id:{note_id}, err: {ex}"
                )
                return None
                
            return None

    async def batch_get_note_comments(
        self, note_list: List[str], xsec_tokens: List[str]
    ):
        """Batch get note comments"""
        if not config.ENABLE_GET_COMMENTS:
            utils.logger.info(
                f"[XiaoHongShuCrawler.batch_get_note_comments] Crawling comment mode is not enabled"
            )
            return

        utils.logger.info(
            f"[XiaoHongShuCrawler.batch_get_note_comments] Begin batch get note comments, note list: {note_list}"
        )
        semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
        task_list: List[Task] = []
        for index, note_id in enumerate(note_list):
            task = asyncio.create_task(
                self.get_comments(
                    note_id=note_id, xsec_token=xsec_tokens[index], semaphore=semaphore
                ),
                name=note_id,
            )
            task_list.append(task)
        await asyncio.gather(*task_list)

    async def get_comments(
        self, note_id: str, xsec_token: str, semaphore: asyncio.Semaphore, max_comments: int
    ):
        """Get note comments with keyword filtering and quantity limitation"""
        async with semaphore:
            utils.logger.info(
                f"[XiaoHongShuCrawler.get_comments] Begin get note id comments {note_id}"
            )
            # When proxy is not enabled, increase the crawling interval
            if config.ENABLE_IP_PROXY:
                crawl_interval = random.random()
            else:
                crawl_interval = random.uniform(1, config.CRAWLER_MAX_SLEEP_SEC)
            await self.xhs_client.get_note_all_comments(
                note_id=note_id,
                xsec_token=xsec_token,
                crawl_interval=crawl_interval,
                callback=xhs_store.batch_update_xhs_note_comments,
                max_count=max_comments,
            )

    @staticmethod
    def format_proxy_info(
        ip_proxy_info: IpInfoModel,
    ) -> Tuple[Optional[Dict], Optional[Dict]]:
        """format proxy info for playwright and httpx"""
        playwright_proxy = {
            "server": f"{ip_proxy_info.protocol}{ip_proxy_info.ip}:{ip_proxy_info.port}",
            "username": ip_proxy_info.user,
            "password": ip_proxy_info.password,
        }
        httpx_proxy = {
            f"{ip_proxy_info.protocol}": f"http://{ip_proxy_info.user}:{ip_proxy_info.password}@{ip_proxy_info.ip}:{ip_proxy_info.port}"
        }
        return playwright_proxy, httpx_proxy

    async def create_xhs_client(self, httpx_proxy: Optional[str]) -> XiaoHongShuClient:
        """Create xhs client"""
        utils.logger.info(
            "[XiaoHongShuCrawler.create_xhs_client] Begin create xiaohongshu API client ..."
        )
        cookie_str, cookie_dict = utils.convert_cookies(
            await self.browser_context.cookies()
        )
        xhs_client_obj = XiaoHongShuClient(
            proxies=httpx_proxy,
            headers={
                "User-Agent": self.user_agent,
                "Cookie": cookie_str,
                "Origin": "https://www.xiaohongshu.com",
                "Referer": "https://www.xiaohongshu.com",
                "Content-Type": "application/json;charset=UTF-8",
            },
            playwright_page=self.context_page,
            cookie_dict=cookie_dict,
        )
        return xhs_client_obj

    async def launch_browser(
        self,
        chromium: BrowserType,
        playwright_proxy: Optional[Dict],
        user_agent: Optional[str],
        headless: bool = True,
    ) -> BrowserContext:
        """Launch browser and create browser context"""
        utils.logger.info(
            "[XiaoHongShuCrawler.launch_browser] Begin create browser context ..."
        )
        if config.SAVE_LOGIN_STATE:
            # feat issue #14
            # we will save login state to avoid login every time
            user_data_dir = os.path.join(
                os.getcwd(), "browser_data", config.USER_DATA_DIR % config.PLATFORM
            )  # type: ignore
            browser_context = await chromium.launch_persistent_context(
                user_data_dir=user_data_dir,
                accept_downloads=True,
                headless=headless,
                proxy=playwright_proxy,  # type: ignore
                viewport={"width": 1920, "height": 1080},
                user_agent=user_agent,
            )
            return browser_context
        else:
            browser = await chromium.launch(headless=headless, proxy=playwright_proxy)  # type: ignore
            browser_context = await browser.new_context(
                viewport={"width": 1920, "height": 1080}, user_agent=user_agent
            )
            return browser_context

    async def close(self):
        """Close browser context"""
        try:
            if hasattr(self, 'browser_context') and self.browser_context:
                try:
                    # 先关闭所有页面
                    if hasattr(self, 'context_page') and self.context_page:
                        try:
                            await self.context_page.close()
                            utils.logger.info("[XiaoHongShuCrawler.close] Page closed successfully...")
                        except Exception as page_err:
                            utils.logger.warning(f"[XiaoHongShuCrawler.close] Non-fatal error while closing page: {str(page_err)}")
                        finally:
                            self.context_page = None
                            
                    # 关闭浏览器上下文
                    try:
                        await self.browser_context.close()
                        utils.logger.info("[XiaoHongShuCrawler.close] Browser context closed successfully...")
                    except Exception as context_err:
                        utils.logger.warning(f"[XiaoHongShuCrawler.close] Non-fatal error while closing browser context: {str(context_err)}")
                finally:
                    self.browser_context = None
                    
                    # 清理客户端
                    if hasattr(self, 'xhs_client'):
                        try:
                            await self.xhs_client.close()
                            utils.logger.info("[XiaoHongShuCrawler.close] XHS client closed successfully...")
                        except Exception as client_err:
                            utils.logger.warning(f"[XiaoHongShuCrawler.close] Non-fatal error while closing XHS client: {str(client_err)}")
                        finally:
                            self.xhs_client = None
                    
            else:
                utils.logger.info("[XiaoHongShuCrawler.close] Browser context was not initialized")
                
        except Exception as e:
            utils.logger.error(f"[XiaoHongShuCrawler.close] Error during cleanup: {str(e)}")
            import traceback
            utils.logger.error(f"[XiaoHongShuCrawler.close] Error traceback: {traceback.format_exc()}")
            
    async def __aenter__(self):
        """异步上下文管理器入口"""
        return self
        
    async def __aexit__(self, exc_type, exc_val, exc_tb):
        """异步上下文管理器出口"""
        await self.close()

    async def get_notice_media(self, note_detail: Dict):
        if not config.ENABLE_GET_IMAGES:
            utils.logger.info(
                f"[XiaoHongShuCrawler.get_notice_media] Crawling image mode is not enabled"
            )
            return
        await self.get_note_images(note_detail)
        await self.get_notice_video(note_detail)

    async def get_note_images(self, note_item: Dict):
        """
        get note images. please use get_notice_media
        :param note_item:
        :return:
        """
        if not config.ENABLE_GET_IMAGES:
            return
        note_id = note_item.get("note_id")
        image_list: List[Dict] = note_item.get("image_list", [])

        for img in image_list:
            if img.get("url_default") != "":
                img.update({"url": img.get("url_default")})

        if not image_list:
            return
        picNum = 0
        for pic in image_list:
            url = pic.get("url")
            if not url:
                continue
            content = await self.xhs_client.get_note_media(url)
            if content is None:
                continue
            extension_file_name = f"{picNum}.jpg"
            picNum += 1
            await xhs_store.update_xhs_note_image(note_id, content, extension_file_name)

    async def get_notice_video(self, note_item: Dict):
        """
        get note images. please use get_notice_media
        :param note_item:
        :return:
        """
        if not config.ENABLE_GET_IMAGES:
            return
        note_id = note_item.get("note_id")

        videos = xhs_store.get_video_url_arr(note_item)

        if not videos:
            return
        videoNum = 0
        for url in videos:
            content = await self.xhs_client.get_note_media(url)
            if content is None:
                continue
            extension_file_name = f"{videoNum}.mp4"
            videoNum += 1
            await xhs_store.update_xhs_note_image(note_id, content, extension_file_name)

    async def wait_for_page_load(self, timeout_ms=30000):
        """等待页面加载完成"""
        try:
            # 等待页面加载完成
            await self.context_page.wait_for_load_state("domcontentloaded", timeout=timeout_ms)
            await self.context_page.wait_for_load_state("networkidle", timeout=timeout_ms)
            
            # 检查页面是否包含预期内容
            content = await self.context_page.content()
            if "小红书" not in content:
                utils.logger.warning("[XiaoHongShuCrawler.wait_for_page_load] Page content validation failed")
                return False
                
            return True
            
        except Exception as e:
            utils.logger.error(f"[XiaoHongShuCrawler.wait_for_page_load] Error: {str(e)}")
            return False

    async def navigate_with_retry(self, url, max_retries=3):
        """带重试的页面导航"""
        for attempt in range(max_retries):
            try:
                await self.context_page.goto(url, timeout=30000)
                if await self.wait_for_page_load():
                    return True
                    
                utils.logger.warning(f"[XiaoHongShuCrawler.navigate_with_retry] Attempt {attempt + 1} failed, retrying...")
                await asyncio.sleep(2)  # 重试前等待2秒
                
            except Exception as e:
                utils.logger.error(f"[XiaoHongShuCrawler.navigate_with_retry] Error on attempt {attempt + 1}: {str(e)}")
                if attempt < max_retries - 1:
                    await asyncio.sleep(2)
                    continue
                raise
                
        return False
