import re
from urllib.parse import urlparse, parse_qs, urlencode, urlunparse
from collections import deque

class URLManager:
    """
    URL管理器，负责管理待爬取的URL队列，处理URL解析和翻页逻辑
    """
    
    def __init__(self):
        # 使用双端队列存储待爬取的URL
        self.url_queue = deque()
        # 已处理的URL集合，避免重复爬取
        self.visited_urls = set()
        # 小红书域名基础
        self.base_domain = "xiaohongshu.com"
        # 搜索页面基础URL
        self.search_base_url = "https://www.xiaohongshu.com/search_result"
        # 笔记页面基础URL
        self.note_base_url = "https://www.xiaohongshu.com/explore"
        
    def add_url(self, url):
        """
        添加URL到队列
        
        Args:
            url (str): 待添加的URL
        
        Returns:
            bool: 添加是否成功
        """
        if url and url not in self.visited_urls:
            self.url_queue.append(url)
            return True
        return False
    
    def add_urls(self, urls):
        """
        批量添加URL到队列
        
        Args:
            urls (list): URL列表
        """
        for url in urls:
            self.add_url(url)
    
    def get_next_url(self):
        """
        获取下一个待处理的URL
        
        Returns:
            str: 下一个URL，如果队列为空则返回None
        """
        if self.url_queue:
            url = self.url_queue.popleft()
            self.visited_urls.add(url)
            return url
        return None
    
    def has_next_url(self):
        """
        检查是否还有未处理的URL
        
        Returns:
            bool: 是否还有URL
        """
        return len(self.url_queue) > 0
    
    def extract_note_id(self, url):
        """
        从URL中提取笔记ID
        
        Args:
            url (str): 小红书笔记URL
            
        Returns:
            str: 笔记ID，如果无法提取则返回None
        """
        try:
            # 匹配explore/后的笔记ID部分
            match = re.search(r'(?:explore|search_result)/(\w+)', url)
            if match:
                note_id = match.group(1)
                # 去除可能的查询参数部分
                if '?' in note_id:
                    note_id = note_id.split('?')[0]
                return note_id
            return None
        except Exception as e:
            print(f"提取笔记ID出错: {e}")
            return None
    
    def generate_search_url(self, keyword, page=1, page_size=20):
        """
        生成搜索URL
        
        Args:
            keyword (str): 搜索关键词
            page (int): 页码
            page_size (int): 每页数量
            
        Returns:
            str: 搜索URL
        """
        # 小红书搜索URL格式通常为 /search_result?keyword=xxx&page=yy
        params = {
            'keyword': keyword,
            'page': page,
            'page_size': page_size
        }
        return f"{self.search_base_url}?{urlencode(params)}"
    
    def generate_note_url(self, note_id):
        """
        生成笔记详情页URL
        
        Args:
            note_id (str): 笔记ID
            
        Returns:
            str: 笔记URL
        """
        return f"{self.note_base_url}/{note_id}"
    
    def get_next_page_url(self, current_url):
        """
        获取下一页的URL
        
        Args:
            current_url (str): 当前页面URL
            
        Returns:
            str: 下一页URL
        """
        try:
            # 解析当前URL
            parsed_url = urlparse(current_url)
            # 获取查询参数
            query_params = parse_qs(parsed_url.query)
            
            # 如果是搜索页面，处理分页
            if 'search_result' in current_url:
                # 获取当前页码，默认为1
                current_page = int(query_params.get('page', [1])[0])
                # 增加页码
                query_params['page'] = [str(current_page + 1)]
                
                # 重建URL
                new_query = urlencode(query_params, doseq=True)
                new_url_parts = (parsed_url.scheme, parsed_url.netloc, parsed_url.path,
                               parsed_url.params, new_query, parsed_url.fragment)
                return urlunparse(new_url_parts)
            
            return None
        except Exception as e:
            print(f"获取下一页URL出错: {e}")
            return None
    
    def validate_url(self, url):
        """
        验证URL是否为小红书URL
        
        Args:
            url (str): 待验证的URL
            
        Returns:
            bool: 是否为有效的小红书URL
        """
        try:
            parsed_url = urlparse(url)
            return self.base_domain in parsed_url.netloc
        except Exception:
            return False
    
    def get_visited_count(self):
        """
        获取已访问URL数量
        
        Returns:
            int: 已访问URL数量
        """
        return len(self.visited_urls)
    
    def get_queue_count(self):
        """
        获取队列中URL数量
        
        Returns:
            int: 队列中URL数量
        """
        return len(self.url_queue)
    
    def reset(self):
        """
        重置URL管理器状态
        """
        self.url_queue.clear()
        self.visited_urls.clear()

# 测试代码
if __name__ == "__main__":
    url_manager = URLManager()
    
    # 测试添加URL
    url_manager.add_url("https://www.xiaohongshu.com/explore/68466bed00000000210052ab")
    url_manager.add_url("https://www.xiaohongshu.com/search_result?keyword=od药&page=1")
    
    # 测试提取笔记ID
    url = "https://www.xiaohongshu.com/explore/68466bed00000000210052ab?xsec_token=ABUovRXRNJwaMgDWz5M081lXniumlzApIMQEorqv_8Aco=&xsec_source=pc_search"
    note_id = url_manager.extract_note_id(url)
    print(f"提取的笔记ID: {note_id}")
    
    # 测试生成笔记URL
    generated_note_url = url_manager.generate_note_url(note_id)
    print(f"生成的笔记URL: {generated_note_url}")
    
    # 测试生成搜索URL
    search_url = url_manager.generate_search_url("旅行", page=2)
    print(f"生成的搜索URL: {search_url}")
    
    # 测试获取下一页URL
    next_page_url = url_manager.get_next_page_url(search_url)
    print(f"下一页URL: {next_page_url}")
    
    # 测试URL验证
    print(f"是否为有效URL: {url_manager.validate_url(url)}")
    print(f"是否为有效URL: {url_manager.validate_url('https://www.google.com')}")