from bs4 import BeautifulSoup
import json
import re
from typing import Dict, List, Optional, Any
import logging

class XiaohongshuParser:
    """
    小红书页面解析器，负责从HTML内容中提取笔记信息
    """
    
    def __init__(self):
        # 初始化日志
        logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        self.logger = logging.getLogger('XiaohongshuParser')
    
    def parse_note_page(self, html_content: str) -> Dict[str, Any]:
        """
        解析小红书笔记页面
        
        Args:
            html_content (str): 笔记页面的HTML内容
            
        Returns:
            dict: 包含笔记详细信息的字典
        """
        try:
            soup = BeautifulSoup(html_content, 'html.parser')
            result = {
                'success': False,
                'note_info': {},
                'user_info': {},
                'images': [],
                'comments': [],
                'error': None
            }
            
            # 尝试通过解析JavaScript变量获取数据
            data = self._extract_data_from_js(soup)
            
            if data:
                # 提取笔记信息
                result['note_info'] = self._extract_note_info(data)
                # 提取用户信息
                result['user_info'] = self._extract_user_info(data)
                # 提取图片信息
                result['images'] = self._extract_images(data)
                # 提取评论信息
                result['comments'] = self._extract_comments(data)
                result['success'] = True
            else:
                # 尝试通过HTML元素解析（备用方法）
                note_info = self._parse_note_info_from_html(soup)
                user_info = self._parse_user_info_from_html(soup)
                images = self._parse_images_from_html(soup)
                
                if note_info or user_info:
                    result['note_info'] = note_info
                    result['user_info'] = user_info
                    result['images'] = images
                    result['success'] = True
                else:
                    result['error'] = "无法解析页面数据"
                    self.logger.warning("无法通过JS或HTML解析页面数据")
                    
            return result
            
        except Exception as e:
            self.logger.error(f"解析笔记页面出错: {str(e)}")
            return {
                'success': False,
                'note_info': {},
                'user_info': {},
                'images': [],
                'comments': [],
                'error': str(e)
            }
    
    def _extract_data_from_js(self, soup: BeautifulSoup) -> Optional[Dict]:
        """
        从页面JavaScript变量中提取JSON数据
        
        Args:
            soup (BeautifulSoup): BeautifulSoup对象
            
        Returns:
            dict: 提取的数据，如果未找到则返回None
        """
        try:
            # 查找包含页面数据的script标签
            script_tags = soup.find_all('script')
            
            for script in script_tags:
                script_content = script.string
                if script_content:
                    # 尝试匹配页面数据
                    # 匹配 window.__INITIAL_STATE__
                    initial_state_match = re.search(r'window\.__INITIAL_STATE__\s*=\s*(\{.*?\});', script_content, re.DOTALL)
                    if initial_state_match:
                        try:
                            return json.loads(initial_state_match.group(1))
                        except json.JSONDecodeError:
                            self.logger.warning("解析__INITIAL_STATE__时JSON格式错误")
                    
                    # 尝试匹配 window._sharedData
                    shared_data_match = re.search(r'window\._sharedData\s*=\s*(\{.*?\});', script_content, re.DOTALL)
                    if shared_data_match:
                        try:
                            return json.loads(shared_data_match.group(1))
                        except json.JSONDecodeError:
                            self.logger.warning("解析_sharedData时JSON格式错误")
                    
                    # 尝试匹配包含note或笔记数据的其他变量
                    note_data_match = re.search(r'\b(note|笔记|post)\s*:\s*(\{.*?\})', script_content, re.DOTALL)
                    if note_data_match:
                        try:
                            return json.loads(note_data_match.group(2))
                        except json.JSONDecodeError:
                            self.logger.warning("解析note数据时JSON格式错误")
            
            return None
        except Exception as e:
            self.logger.error(f"从JS中提取数据出错: {str(e)}")
            return None
    
    def _extract_note_info(self, data: Dict) -> Dict[str, Any]:
        """
        从JSON数据中提取笔记信息
        
        Args:
            data (dict): 页面数据
            
        Returns:
            dict: 笔记信息
        """
        note_info = {}
        
        try:
            # 尝试不同的数据路径
            # 路径1: 直接查找note或post字段
            if 'note' in data:
                note_data = data['note']
                note_info.update({
                    'id': note_data.get('id', ''),
                    'title': note_data.get('title', ''),
                    'content': note_data.get('content', ''),
                    'likes_count': note_data.get('likes', 0),
                    'comments_count': note_data.get('comments', 0),
                    'collections_count': note_data.get('collections', 0),
                    'share_count': note_data.get('shareCount', 0),
                    'publish_time': note_data.get('time', ''),
                    'topic': note_data.get('topic', ''),
                    'tags': note_data.get('tags', []),
                    'location': note_data.get('location', ''),
                    'views_count': note_data.get('viewsCount', 0)
                })
            elif 'post' in data:
                post_data = data['post']
                note_info.update({
                    'id': post_data.get('id', ''),
                    'title': post_data.get('title', ''),
                    'content': post_data.get('content', ''),
                    'likes_count': post_data.get('likes', 0),
                    'comments_count': post_data.get('comments', 0),
                    'collections_count': post_data.get('collections', 0),
                    'share_count': post_data.get('shareCount', 0),
                    'publish_time': post_data.get('time', ''),
                    'topic': post_data.get('topic', ''),
                    'tags': post_data.get('tags', []),
                    'location': post_data.get('location', ''),
                    'views_count': post_data.get('viewsCount', 0)
                })
            # 路径2: 查找可能包含笔记信息的嵌套结构
            elif 'data' in data:
                return self._extract_note_info(data['data'])
            elif 'noteDetail' in data:
                return self._extract_note_info(data['noteDetail'])
            elif 'notePage' in data:
                return self._extract_note_info(data['notePage'])
            elif 'noteCollection' in data:
                return self._extract_note_info(data['noteCollection'])
            
            # 如果没有找到具体数据，尝试提取一些通用信息
            if not note_info and isinstance(data, dict):
                note_info['id'] = data.get('id', '') or data.get('noteId', '') or data.get('note_id', '')
                note_info['title'] = data.get('title', '') or data.get('text', '')[:50]  # 截取前50个字符作为标题
                note_info['content'] = data.get('content', '') or data.get('text', '')
                
                # 提取统计信息
                stats_fields = ['likesCount', 'likeCount', 'likes', 'likedCount']
                for field in stats_fields:
                    if field in data:
                        note_info['likes_count'] = data[field]
                        break
                
                comment_fields = ['commentsCount', 'commentCount', 'comments']
                for field in comment_fields:
                    if field in data:
                        note_info['comments_count'] = data[field]
                        break
            
        except Exception as e:
            self.logger.error(f"提取笔记信息出错: {str(e)}")
        
        return note_info
    
    def _extract_user_info(self, data: Dict) -> Dict[str, Any]:
        """
        从JSON数据中提取用户信息
        
        Args:
            data (dict): 页面数据
            
        Returns:
            dict: 用户信息
        """
        user_info = {}
        
        try:
            # 尝试不同的数据路径
            # 路径1: 直接查找user或author字段
            if 'user' in data:
                user_data = data['user']
                user_info.update({
                    'id': user_data.get('id', ''),
                    'nickname': user_data.get('nickname', ''),
                    'avatar': user_data.get('avatar', ''),
                    'description': user_data.get('description', ''),
                    'follower_count': user_data.get('followerCount', 0),
                    'following_count': user_data.get('followingCount', 0),
                    'notes_count': user_data.get('notesCount', 0),
                    'liked_count': user_data.get('likedCount', 0),
                    'is_following': user_data.get('isFollowing', False)
                })
            elif 'author' in data:
                author_data = data['author']
                user_info.update({
                    'id': author_data.get('id', ''),
                    'nickname': author_data.get('nickname', ''),
                    'avatar': author_data.get('avatar', ''),
                    'description': author_data.get('description', ''),
                    'follower_count': author_data.get('followerCount', 0),
                    'following_count': author_data.get('followingCount', 0),
                    'notes_count': author_data.get('notesCount', 0),
                    'liked_count': author_data.get('likedCount', 0),
                    'is_following': author_data.get('isFollowing', False)
                })
            # 路径2: 查找可能包含用户信息的嵌套结构
            elif 'data' in data:
                return self._extract_user_info(data['data'])
            elif 'noteDetail' in data:
                return self._extract_user_info(data['noteDetail'])
            elif 'notePage' in data:
                return self._extract_user_info(data['notePage'])
            
            # 如果没有找到具体数据，尝试提取一些通用信息
            if not user_info and isinstance(data, dict):
                user_info['id'] = data.get('userId', '') or data.get('user_id', '')
                user_info['nickname'] = data.get('nickname', '') or data.get('username', '')
                user_info['avatar'] = data.get('avatar', '') or data.get('avatarUrl', '')
        
        except Exception as e:
            self.logger.error(f"提取用户信息出错: {str(e)}")
        
        return user_info
    
    def _extract_images(self, data: Dict) -> List[Dict[str, str]]:
        """
        从JSON数据中提取图片信息
        
        Args:
            data (dict): 页面数据
            
        Returns:
            list: 图片信息列表
        """
        images = []
        
        try:
            # 尝试不同的数据路径
            # 路径1: 直接查找images字段
            if 'images' in data and isinstance(data['images'], list):
                for idx, img_data in enumerate(data['images']):
                    if isinstance(img_data, dict):
                        images.append({
                            'id': img_data.get('id', f'image_{idx}'),
                            'url': img_data.get('url', '') or img_data.get('src', ''),
                            'width': img_data.get('width', 0),
                            'height': img_data.get('height', 0),
                            'alt': img_data.get('alt', '')
                        })
                    elif isinstance(img_data, str):
                        images.append({
                            'id': f'image_{idx}',
                            'url': img_data,
                            'width': 0,
                            'height': 0,
                            'alt': ''
                        })
            
            # 路径2: 查找可能包含图片信息的嵌套结构
            elif 'data' in data:
                return self._extract_images(data['data'])
            elif 'noteDetail' in data:
                return self._extract_images(data['noteDetail'])
            elif 'notePage' in data:
                return self._extract_images(data['notePage'])
            elif 'note' in data:
                return self._extract_images(data['note'])
        
        except Exception as e:
            self.logger.error(f"提取图片信息出错: {str(e)}")
        
        return images
    
    def _extract_comments(self, data: Dict) -> List[Dict[str, Any]]:
        """
        从JSON数据中提取评论信息
        
        Args:
            data (dict): 页面数据
            
        Returns:
            list: 评论信息列表
        """
        comments = []
        
        try:
            # 尝试不同的数据路径
            # 路径1: 直接查找comments字段
            if 'comments' in data and isinstance(data['comments'], list):
                for comment_data in data['comments']:
                    if isinstance(comment_data, dict):
                        comments.append({
                            'id': comment_data.get('id', ''),
                            'content': comment_data.get('content', ''),
                            'likes_count': comment_data.get('likesCount', 0),
                            'created_at': comment_data.get('createdAt', '') or comment_data.get('time', ''),
                            'user': {
                                'id': comment_data.get('userId', '') or comment_data.get('user', {}).get('id', ''),
                                'nickname': comment_data.get('nickname', '') or comment_data.get('user', {}).get('nickname', ''),
                                'avatar': comment_data.get('avatar', '') or comment_data.get('user', {}).get('avatar', '')
                            },
                            'parent_id': comment_data.get('parentId', '')
                        })
            
            # 路径2: 查找可能包含评论信息的嵌套结构
            elif 'data' in data:
                return self._extract_comments(data['data'])
            elif 'noteDetail' in data:
                return self._extract_comments(data['noteDetail'])
            elif 'notePage' in data:
                return self._extract_comments(data['notePage'])
        
        except Exception as e:
            self.logger.error(f"提取评论信息出错: {str(e)}")
        
        return comments
    
    def _parse_note_info_from_html(self, soup: BeautifulSoup) -> Dict[str, Any]:
        """
        从HTML元素解析笔记信息（备用方法）
        
        Args:
            soup (BeautifulSoup): BeautifulSoup对象
            
        Returns:
            dict: 笔记信息
        """
        note_info = {}
        
        try:
            # 提取标题
            title_element = soup.find('h1')
            if title_element:
                note_info['title'] = title_element.text.strip()
            
            # 提取内容
            content_element = soup.find('div', class_=re.compile(r'content|note-content|main-text'))
            if not content_element:
                content_element = soup.find('p')  # 尝试查找第一个p标签
            if content_element:
                note_info['content'] = content_element.text.strip()
            
            # 提取统计数据
            # 点赞数
            like_element = soup.find('span', class_=re.compile(r'like|heart|digg'))
            if like_element:
                like_text = like_element.text.strip()
                note_info['likes_count'] = self._extract_number(like_text)
            
            # 评论数
            comment_element = soup.find('span', class_=re.compile(r'comment|chat|discuss'))
            if comment_element:
                comment_text = comment_element.text.strip()
                note_info['comments_count'] = self._extract_number(comment_text)
            
            # 收藏数
            collect_element = soup.find('span', class_=re.compile(r'collect|bookmark|save'))
            if collect_element:
                collect_text = collect_element.text.strip()
                note_info['collections_count'] = self._extract_number(collect_text)
            
            # 分享数
            share_element = soup.find('span', class_=re.compile(r'share|forward'))
            if share_element:
                share_text = share_element.text.strip()
                note_info['share_count'] = self._extract_number(share_text)
        
        except Exception as e:
            self.logger.error(f"从HTML解析笔记信息出错: {str(e)}")
        
        return note_info
    
    def _parse_user_info_from_html(self, soup: BeautifulSoup) -> Dict[str, Any]:
        """
        从HTML元素解析用户信息（备用方法）
        
        Args:
            soup (BeautifulSoup): BeautifulSoup对象
            
        Returns:
            dict: 用户信息
        """
        user_info = {}
        
        try:
            # 提取用户名
            nickname_element = soup.find('span', class_=re.compile(r'nickname|username|author'))
            if nickname_element:
                user_info['nickname'] = nickname_element.text.strip()
            
            # 提取头像
            avatar_element = soup.find('img', class_=re.compile(r'avatar|head|user-image'))
            if avatar_element and avatar_element.has_attr('src'):
                user_info['avatar'] = avatar_element['src']
            
            # 提取用户描述
            desc_element = soup.find('div', class_=re.compile(r'description|bio|intro'))
            if desc_element:
                user_info['description'] = desc_element.text.strip()
        
        except Exception as e:
            self.logger.error(f"从HTML解析用户信息出错: {str(e)}")
        
        return user_info
    
    def _parse_images_from_html(self, soup: BeautifulSoup) -> List[Dict[str, str]]:
        """
        从HTML元素解析图片信息（备用方法）
        
        Args:
            soup (BeautifulSoup): BeautifulSoup对象
            
        Returns:
            list: 图片信息列表
        """
        images = []
        
        try:
            # 查找所有图片标签
            img_elements = soup.find_all('img')
            
            for idx, img in enumerate(img_elements):
                # 过滤掉小图标和头像
                width = int(img.get('width', 0)) if img.get('width') else 0
                height = int(img.get('height', 0)) if img.get('height') else 0
                
                # 只收集有意义的图片（排除过小的图片）
                if (width > 50 or height > 50) and img.has_attr('src'):
                    img_url = img['src']
                    # 过滤掉无效URL
                    if img_url.startswith(('http://', 'https://')) and not any(kw in img_url.lower() for kw in ['avatar', 'icon', 'logo']):
                        images.append({
                            'id': f'image_{idx}',
                            'url': img_url,
                            'width': width,
                            'height': height,
                            'alt': img.get('alt', '')
                        })
        
        except Exception as e:
            self.logger.error(f"从HTML解析图片信息出错: {str(e)}")
        
        return images
    
    def _extract_number(self, text: str) -> int:
        """
        从文本中提取数字
        
        Args:
            text (str): 包含数字的文本
            
        Returns:
            int: 提取的数字
        """
        try:
            # 匹配数字
            match = re.search(r'\d+(?:\.\d+)?', text)
            if match:
                number_str = match.group(0)
                
                # 处理带单位的情况（如：1.2k, 3.5w等）
                if 'k' in text.lower():
                    return int(float(number_str) * 1000)
                elif 'w' in text.lower():
                    return int(float(number_str) * 10000)
                elif 'm' in text.lower():
                    return int(float(number_str) * 1000000)
                else:
                    return int(float(number_str))
        
        except Exception:
            pass
            
        return 0
    
    def extract_note_id_from_url(self, url: str) -> Optional[str]:
        """
        从URL中提取笔记ID
        
        Args:
            url (str): 笔记URL
            
        Returns:
            str: 笔记ID，如果未找到则返回None
        """
        try:
            # 匹配小红书笔记ID格式（通常是16位或更多的16进制字符串）
            patterns = [
                r'/(explore|search_result)/([0-9a-fA-F]{16,})(\?|$)',  # 匹配 /explore/笔记ID 或 /search_result/笔记ID
                r'note/([0-9a-fA-F]{16,})',  # 匹配 note/笔记ID
                r'id=([0-9a-fA-F]{16,})',  # 匹配 id=笔记ID
                r'([0-9a-fA-F]{16,})'  # 匹配至少16个字符的16进制字符串（作为后备）
            ]
            
            for pattern in patterns:
                match = re.search(pattern, url)
                if match and len(match.group(1)) >= 16:  # 确保ID长度合理
                    return match.group(1)
            
            return None
        except Exception as e:
            self.logger.error(f"从URL提取笔记ID出错: {str(e)}")
            return None
    
    def parse_search_results(self, html_content: str) -> List[str]:
        """
        解析搜索结果页面，提取笔记ID列表
        
        Args:
            html_content (str): 搜索结果页面的HTML内容
            
        Returns:
            list: 笔记ID列表
        """
        note_ids = []
        try:
            soup = BeautifulSoup(html_content, 'html.parser')
            
            # 方法1: 从JavaScript中提取JSON数据
            script_tags = soup.find_all('script')
            for script in script_tags:
                script_content = script.string
                if script_content:
                    # 尝试匹配包含搜索结果的JSON数据
                    # 匹配可能的搜索结果数据模式
                    patterns = [
                        r'window\.__INITIAL_STATE__\s*=\s*(\{.*?\});',
                        r'search\s*:\s*(\{.*?\})',
                        r'results\s*:\s*\[(.*?)\]',
                        r'notes\s*:\s*\[(.*?)\]',
                        r'items\s*:\s*\[(.*?)\]',
                        r'\{"id":"([0-9a-fA-F]{16,})"',  # 直接匹配id字段
                    ]
                    
                    for pattern in patterns:
                        matches = re.findall(pattern, script_content, re.DOTALL)
                        for match in matches:
                            try:
                                # 尝试将匹配内容解析为JSON
                                if isinstance(match, tuple):
                                    match_content = match[0]
                                else:
                                    match_content = match
                                
                                # 如果匹配内容可能是JSON数组
                                if match_content.startswith('['):
                                    data = json.loads(f'{match_content}')
                                # 如果匹配内容可能是JSON对象
                                elif match_content.startswith('{'):
                                    data = json.loads(match_content)
                                else:
                                    # 尝试直接查找ID
                                    id_match = re.search(r'"id":"([0-9a-fA-F]{16,})"', match_content)
                                    if id_match:
                                        note_id = id_match.group(1)
                                        if note_id not in note_ids:
                                            note_ids.append(note_id)
                                    continue
                                
                                # 从JSON数据中提取笔记ID
                                note_ids.extend(self._extract_note_ids_from_json(data))
                            except json.JSONDecodeError:
                                # 如果解析失败，继续尝试其他匹配
                                continue
                            except Exception as e:
                                self.logger.warning(f"解析JSON数据时出错: {str(e)}")
                                continue
            
            # 方法2: 从HTML元素中提取笔记链接
            if not note_ids:
                # 查找可能包含笔记链接的元素
                a_tags = soup.find_all('a', href=True)
                for a_tag in a_tags:
                    href = a_tag['href']
                    note_id = self.extract_note_id_from_url(href)
                    if note_id and note_id not in note_ids:
                        note_ids.append(note_id)
                
                # 查找可能包含笔记ID的div或其他元素
                for selector in ['div[data-note-id]', 'div[data-id]', '[data-note-id]', '[data-id]']:
                    elements = soup.select(selector)
                    for element in elements:
                        note_id = element.get('data-note-id') or element.get('data-id')
                        if note_id and len(note_id) >= 16 and note_id not in note_ids:
                            note_ids.append(note_id)
            
            # 方法3: 直接使用正则表达式从HTML中提取笔记ID
            if not note_ids:
                # 匹配常见的笔记ID出现模式
                direct_patterns = [
                    r'data-note-id="([0-9a-fA-F]{16,})"',
                    r'data-id="([0-9a-fA-F]{16,})"',
                    r'href="/explore/([0-9a-fA-F]{16,})',
                    r'href="/search_result/([0-9a-fA-F]{16,})',
                    r'note_id=([0-9a-fA-F]{16,})',
                    r'\"id\":\"([0-9a-fA-F]{16,})\"',
                ]
                
                for pattern in direct_patterns:
                    matches = re.findall(pattern, html_content)
                    for match in matches:
                        if match and len(match) >= 16 and match not in note_ids:
                            note_ids.append(match)
            
            self.logger.info(f"从搜索结果中提取到 {len(note_ids)} 个笔记ID")
            return note_ids
            
        except Exception as e:
            self.logger.error(f"解析搜索结果页面出错: {str(e)}")
            return []
    
    def _extract_note_ids_from_json(self, data: Any) -> List[str]:
        """
        从JSON数据中递归提取笔记ID
        
        Args:
            data: JSON数据（字典、列表或其他）
            
        Returns:
            list: 提取的笔记ID列表
        """
        note_ids = []
        
        # 递归处理字典
        if isinstance(data, dict):
            # 检查是否有id字段且符合笔记ID格式
            if 'id' in data:
                id_value = str(data['id'])
                if re.match(r'^[0-9a-fA-F]{16,}$', id_value):
                    note_ids.append(id_value)
            elif 'noteId' in data:
                id_value = str(data['noteId'])
                if re.match(r'^[0-9a-fA-F]{16,}$', id_value):
                    note_ids.append(id_value)
            elif 'note_id' in data:
                id_value = str(data['note_id'])
                if re.match(r'^[0-9a-fA-F]{16,}$', id_value):
                    note_ids.append(id_value)
            
            # 递归处理字典中的所有值
            for value in data.values():
                note_ids.extend(self._extract_note_ids_from_json(value))
        
        # 递归处理列表
        elif isinstance(data, list):
            for item in data:
                note_ids.extend(self._extract_note_ids_from_json(item))
        
        return note_ids

# 测试代码
if __name__ == "__main__":
    # 创建解析器实例
    parser = XiaohongshuParser()
    
    # 示例URL解析
    test_url = "https://www.xiaohongshu.com/explore/68466bed00000000210052ab?xsec_token=ABUovRXRNJwaMgDWz5M081lXniumlzApIMQEorqv_8Aco=&xsec_source=pc_search"
    note_id = parser.extract_note_id_from_url(test_url)
    print(f"从URL提取的笔记ID: {note_id}")
    
    # 如果有测试HTML文件，可以在这里测试解析
    # with open('test_page.html', 'r', encoding='utf-8') as f:
    #     html_content = f.read()
    #     result = parser.parse_note_page(html_content)
    #     print(json.dumps(result, ensure_ascii=False, indent=2))
