import re
import logging
from typing import Dict, List, Optional, Any
from bs4 import BeautifulSoup
import html
import unicodedata

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('XiaohongshuNoteParser')

class XiaohongshuNoteParser:
    """
    小红书笔记详情页面解析器，专门处理从搜索结果进入的笔记详情页面
    特别针对URL格式如 https://www.xiaohongshu.com/explore/68466bed00000000210052ab 的页面
    """
    
    def __init__(self):
        """
        初始化笔记详情解析器
        """
        self.soup_parser = 'lxml'  # 可以使用 'html.parser' 作为备选
        
        # 定义正则表达式模式
        self.patterns = {
            # 从URL中提取笔记ID
            'note_id': re.compile(r'(?:explore|search_result)/(\w+)'),
            # 提取JSON数据
            'json_data': re.compile(r'window\.__INITIAL_STATE__\s*=\s*(\{.*?\});'),
            # 提取图片URL
            'image_url': re.compile(r'src="([^"]*?)"'),
        }
    
    def extract_note_id_from_url(self, url: str) -> Optional[str]:
        """
        从URL中提取笔记ID
        
        Args:
            url (str): 笔记URL
            
        Returns:
            str: 笔记ID，如果提取失败则返回None
        """
        try:
            match = self.patterns['note_id'].search(url)
            if match:
                return match.group(1)
            return None
        except Exception as e:
            logger.error(f"提取笔记ID失败: {url}, 错误: {str(e)}")
            return None
    
    def parse_note_page(self, html_content: str, url: Optional[str] = None) -> Dict[str, Any]:
        """
        解析笔记详情页面，提取笔记内容、用户信息、图片等
        
        Args:
            html_content (str): 页面HTML内容
            url (str, optional): 页面URL，用于提取笔记ID
            
        Returns:
            dict: 包含所有提取信息的字典
        """
        try:
            soup = BeautifulSoup(html_content, self.soup_parser)
            
            # 初始化结果字典
            result = {
                'success': False,
                'error': None,
                'note_info': {
                    'id': self.extract_note_id_from_url(url) if url else '',
                    'title': '',
                    'content': '',
                    'likes_count': 0,
                    'comments_count': 0,
                    'collections_count': 0,
                    'share_count': 0,
                    'publish_time': '',
                },
                'user_info': {
                    'nickname': '',
                    'avatar_url': '',
                    'user_id': '',
                },
                'images': [],
                'videos': [],
                'comments': [],
                'tags': [],
            }
            
            # 方法1：尝试从页面特定位置提取信息
            success = False
            
            # 提取用户信息 (从侧边栏)
            success = success or self._extract_user_info_sidebar(soup, result)
            
            # 提取笔记标题
            success = success or self._extract_note_title(soup, result)
            
            # 提取笔记内容
            success = success or self._extract_note_content(soup, result)
            
            # 提取图片
            success = success or self._extract_note_images(soup, result)
            
            # 提取互动数据 (点赞、评论等)
            success = success or self._extract_interaction_data(soup, result)
            
            # 提取评论
            success = success or self._extract_comments(soup, result)
            
            # 方法2：尝试从JSON数据中提取信息作为备选
            if not success:
                json_data = self._extract_json_data(html_content)
                if json_data:
                    try:
                        import json
                        data = json.loads(json_data)
                        success = self._parse_from_json_data(data, result)
                    except Exception as e:
                        logger.warning(f"解析JSON数据时出错: {str(e)}")
            
            # 设置最终状态
            result['success'] = success
            if not success:
                result['error'] = "无法从页面中提取有效数据"
                logger.error("无法从笔记页面中提取有效数据")
            
            return result
            
        except Exception as e:
            logger.error(f"解析笔记页面时出错: {str(e)}")
            return {
                'success': False,
                'error': str(e)
            }
    
    def _extract_user_info_sidebar(self, soup: BeautifulSoup, result: Dict) -> bool:
        """
        从侧边栏提取用户信息
        
        Args:
            soup (BeautifulSoup): BeautifulSoup对象
            result (dict): 结果字典
            
        Returns:
            bool: 是否成功提取
        """
        try:
            # 查找用户信息区域
            user_section = soup.find('div', class_=re.compile(r'user|author', re.I))
            if not user_section:
                # 尝试其他可能的选择器
                user_section = soup.find('div', {'data-v-a264b01a': True})
                if not user_section:
                    return False
            
            # 提取昵称
            nickname_elem = user_section.find('div', class_=re.compile(r'nickname|username', re.I))
            if not nickname_elem:
                # 尝试查找包含"小狗爱吃麦当劳"这样文本的元素
                nickname_elem = user_section.find(text=re.compile(r'.+'))
            
            if nickname_elem:
                if hasattr(nickname_elem, 'text'):
                    result['user_info']['nickname'] = self._clean_text(nickname_elem.text)
                else:
                    result['user_info']['nickname'] = self._clean_text(nickname_elem)
            
            # 提取头像
            avatar_elem = user_section.find('img', class_=re.compile(r'avatar|head', re.I))
            if not avatar_elem:
                # 查找所有图片，取第一个可能的头像
                all_imgs = user_section.find_all('img')
                if all_imgs:
                    avatar_elem = all_imgs[0]
            
            if avatar_elem and avatar_elem.get('src'):
                result['user_info']['avatar_url'] = avatar_elem.get('src')
            
            return True
        except Exception as e:
            logger.error(f"提取用户信息失败: {str(e)}")
            return False
    
    def _extract_note_title(self, soup: BeautifulSoup, result: Dict) -> bool:
        """
        提取笔记标题
        
        Args:
            soup (BeautifulSoup): BeautifulSoup对象
            result (dict): 结果字典
            
        Returns:
            bool: 是否成功提取
        """
        try:
            # 尝试多种可能的选择器
            title_elem = None
            
            # 1. 查找class包含title的元素
            title_elem = soup.find(class_=re.compile(r'title|note-title', re.I))
            
            # 2. 查找可能的标题标签
            if not title_elem:
                title_elem = soup.find('h1')
            
            # 3. 查找特定内容的元素（如"大家千万不要od呀……"）
            if not title_elem:
                title_elem = soup.find(text=re.compile(r'大家千万不要|提醒|警告|注意', re.I))
                if title_elem and hasattr(title_elem, 'parent'):
                    title_elem = title_elem.parent
            
            # 4. 查找可能包含标题的div
            if not title_elem:
                all_divs = soup.find_all('div')
                for div in all_divs[:50]:  # 只检查前50个
                    text = self._clean_text(div.text)
                    if text and 5 <= len(text) <= 50:  # 合理的标题长度
                        title_elem = div
                        break
            
            if title_elem:
                result['note_info']['title'] = self._clean_text(title_elem.text)
                return True
            
            return False
        except Exception as e:
            logger.error(f"提取笔记标题失败: {str(e)}")
            return False
    
    def _extract_note_content(self, soup: BeautifulSoup, result: Dict) -> bool:
        """
        提取笔记内容
        
        Args:
            soup (BeautifulSoup): BeautifulSoup对象
            result (dict): 结果字典
            
        Returns:
            bool: 是否成功提取
        """
        try:
            # 尝试找到内容区域
            content_elem = None
            
            # 1. 查找class包含content的元素
            content_elem = soup.find(class_=re.compile(r'content|note-content|detail-content', re.I))
            
            # 2. 查找包含特定文本的元素
            if not content_elem:
                special_text = soup.find(text=re.compile(r'会被送到急诊|以后医生|不敢给你开药', re.I))
                if special_text and hasattr(special_text, 'parent'):
                    content_elem = special_text.parent
            
            # 3. 查找p标签集合
            if not content_elem:
                p_tags = soup.find_all('p')
                if p_tags:
                    # 尝试拼接多个p标签作为内容
                    content_text = ' '.join([self._clean_text(p.text) for p in p_tags if self._clean_text(p.text)])
                    if content_text:
                        result['note_info']['content'] = content_text
                        return True
            
            # 4. 查找主内容容器
            if not content_elem:
                main_container = soup.find('div', class_=re.compile(r'main-container|content-container', re.I))
                if main_container:
                    content_elem = main_container
            
            if content_elem:
                result['note_info']['content'] = self._clean_text(content_elem.text)
                return True
            
            return False
        except Exception as e:
            logger.error(f"提取笔记内容失败: {str(e)}")
            return False
    
    def _extract_note_images(self, soup: BeautifulSoup, result: Dict) -> bool:
        """
        提取笔记中的图片
        
        Args:
            soup (BeautifulSoup): BeautifulSoup对象
            result (dict): 结果字典
            
        Returns:
            bool: 是否成功提取
        """
        try:
            # 查找图片区域
            image_container = None
            
            # 1. 查找class包含image或gallery的元素
            image_container = soup.find(class_=re.compile(r'image|gallery|swiper', re.I))
            
            # 2. 查找特定的图片容器结构
            if not image_container:
                image_container = soup.find('div', {'data-v-a264b01a': True})
            
            # 3. 查找带有mask类的元素（根据用户提供的元素信息）
            if not image_container:
                mask_elem = soup.find('a', class_=re.compile(r'cover\s+mask', re.I))
                if mask_elem:
                    image_container = mask_elem
            
            # 提取图片
            images = []
            if image_container:
                img_tags = image_container.find_all('img')
                for img in img_tags:
                    img_url = img.get('src', '') or img.get('data-src', '')
                    if img_url:
                        clean_url = self._clean_image_url(img_url)
                        if clean_url:
                            images.append({
                                'url': clean_url,
                                'width': img.get('width', 0),
                                'height': img.get('height', 0),
                                'alt': img.get('alt', '')
                            })
            
            # 如果没有找到容器，查找所有img标签
            if not images:
                all_imgs = soup.find_all('img')
                for img in all_imgs:
                    img_url = img.get('src', '') or img.get('data-src', '')
                    # 过滤掉头像和小图标
                    if img_url and 'avatar' not in img_url.lower() and 'icon' not in img_url.lower():
                        # 检查URL是否匹配用户提供的示例格式
                        if 'xhscdn.com' in img_url:
                            clean_url = self._clean_image_url(img_url)
                            if clean_url:
                                images.append({
                                    'url': clean_url,
                                    'width': img.get('width', 0),
                                    'height': img.get('height', 0),
                                    'alt': img.get('alt', '')
                                })
            
            # 去重
            unique_images = []
            seen_urls = set()
            for img in images:
                if img['url'] not in seen_urls:
                    seen_urls.add(img['url'])
                    unique_images.append(img)
            
            result['images'] = unique_images[:9]  # 限制最多9张图片
            return len(result.get('images', [])) > 0 if result.get('images') is not None else False
            
        except Exception as e:
            logger.error(f"提取笔记图片失败: {str(e)}")
            return False
    
    def _extract_interaction_data(self, soup: BeautifulSoup, result: Dict) -> bool:
        """
        提取互动数据（点赞、评论等）
        
        Args:
            soup (BeautifulSoup): BeautifulSoup对象
            result (dict): 结果字典
            
        Returns:
            bool: 是否成功提取
        """
        try:
            # 查找互动区域
            interaction_elem = soup.find(class_=re.compile(r'interaction|action-bar', re.I))
            
            # 提取统计信息
            if interaction_elem:
                # 提取点赞数
                likes_elem = interaction_elem.find(class_=re.compile(r'like|heart', re.I))
                if likes_elem:
                    likes_text = self._clean_text(likes_elem.text)
                    if likes_text and likes_text.isdigit():
                        result['note_info']['likes_count'] = int(likes_text)
                
                # 提取评论数
                comments_elem = interaction_elem.find(class_=re.compile(r'comment|message', re.I))
                if comments_elem:
                    comments_text = self._clean_text(comments_elem.text)
                    if comments_text and comments_text.isdigit():
                        result['note_info']['comments_count'] = int(comments_text)
            
            # 也可以尝试直接从页面中查找数字
            page_text = soup.get_text()
            
            # 查找点赞数
            likes_match = re.search(r'(\d+)[赞赞]', page_text)
            if likes_match:
                result['note_info']['likes_count'] = int(likes_match.group(1))
            
            # 查找评论数
            comments_match = re.search(r'(\d+)[评论]', page_text)
            if comments_match:
                result['note_info']['comments_count'] = int(comments_match.group(1))
            
            # 查找收藏数
            collections_match = re.search(r'(\d+)[收藏]', page_text)
            if collections_match:
                result['note_info']['collections_count'] = int(collections_match.group(1))
            
            # 查找分享数
            share_match = re.search(r'(\d+)[分享]', page_text)
            if share_match:
                result['note_info']['share_count'] = int(share_match.group(1))
            
            return True
        except Exception as e:
            logger.error(f"提取互动数据失败: {str(e)}")
            return False
    
    def _extract_comments(self, soup: BeautifulSoup, result: Dict) -> bool:
        """
        提取评论
        
        Args:
            soup (BeautifulSoup): BeautifulSoup对象
            result (dict): 结果字典
            
        Returns:
            bool: 是否成功提取
        """
        try:
            # 查找评论区域
            comments_section = soup.find(class_=re.compile(r'comment|评论', re.I))
            if not comments_section:
                return False
            
            # 查找评论项
            comment_items = []
            
            # 尝试多种方式查找评论项
            comment_items.extend(comments_section.find_all(class_=re.compile(r'comment-item|comment-content', re.I)))
            
            # 如果没找到，查找所有可能的div
            if not comment_items:
                all_divs = comments_section.find_all('div')
                # 过滤可能的评论项
                for div in all_divs:
                    text = self._clean_text(div.text)
                    if text and '@' not in text and len(text) > 5:
                        comment_items.append(div)
            
            # 提取评论内容
            comments = []
            for item in comment_items[:20]:  # 限制最多20条评论
                # 尝试提取用户名和评论内容
                user_nickname = ''
                comment_text = ''
                
                # 查找用户名
                user_elem = item.find(class_=re.compile(r'nickname|username', re.I))
                if user_elem:
                    user_nickname = self._clean_text(user_elem.text)
                
                # 查找评论内容
                content_elem = item.find(class_=re.compile(r'content|text', re.I))
                if content_elem:
                    comment_text = self._clean_text(content_elem.text)
                else:
                    # 如果没找到特定元素，使用整个item的文本
                    comment_text = self._clean_text(item.text)
                    # 尝试从中提取用户名
                    if ':' in comment_text or '：' in comment_text:
                        parts = re.split(r'[:：]', comment_text, 1)
                        if len(parts) == 2:
                            user_nickname = parts[0].strip()
                            comment_text = parts[1].strip()
                
                # 提取点赞数
                likes_count = 0
                like_elem = item.find(class_=re.compile(r'like|赞', re.I))
                if like_elem:
                    like_text = self._clean_text(like_elem.text)
                    if like_text and like_text.isdigit():
                        likes_count = int(like_text)
                
                # 添加到评论列表
                if comment_text:
                    comments.append({
                        'user_nickname': user_nickname,
                        'content': comment_text,
                        'likes_count': likes_count
                    })
            
            result['comments'] = comments
            return len(result.get('comments', [])) > 0 if result.get('comments') is not None else False
        except Exception as e:
            logger.error(f"提取评论失败: {str(e)}")
            return False
    
    def _extract_json_data(self, html_content: str) -> Optional[str]:
        """
        从HTML内容中提取JSON数据
        
        Args:
            html_content (str): 页面HTML内容
            
        Returns:
            str: 提取的JSON字符串，如果没有找到则返回None
        """
        try:
            match = self.patterns['json_data'].search(html_content)
            if match:
                return match.group(1)
            return None
        except Exception as e:
            logger.error(f"提取JSON数据失败: {str(e)}")
            return None
    
    def _parse_from_json_data(self, data: Dict, result: Dict) -> bool:
        """
        从JSON数据中解析信息
        
        Args:
            data (dict): JSON数据
            result (dict): 结果字典
            
        Returns:
            bool: 是否成功解析
        """
        try:
            # 尝试多种可能的数据路径
            paths = [
                data.get('noteData'),
                data.get('noteDetailData', {}).get('noteDetails'),
                data.get('state', {}).get('noteDetailData', {}).get('noteDetails'),
                data.get('note', {}).get('noteDetail', {})
            ]
            
            for path in paths:
                if path and isinstance(path, dict):
                    # 提取笔记信息
                    if 'title' in path:
                        result['note_info']['title'] = path.get('title', '')
                    if 'desc' in path:
                        result['note_info']['content'] = path.get('desc', '')
                    if 'likes' in path:
                        result['note_info']['likes_count'] = path.get('likes', 0)
                    if 'comments' in path:
                        result['note_info']['comments_count'] = path.get('comments', 0)
                    if 'collections' in path:
                        result['note_info']['collections_count'] = path.get('collections', 0)
                    if 'shareCount' in path:
                        result['note_info']['share_count'] = path.get('shareCount', 0)
                    
                    # 提取用户信息
                    user_data = path.get('user', {})
                    if user_data:
                        result['user_info']['nickname'] = user_data.get('nickname', '')
                        result['user_info']['avatar_url'] = user_data.get('avatar', '')
                        result['user_info']['user_id'] = user_data.get('id', '')
                    
                    # 提取图片
                    images = path.get('imageList', [])
                    for img in images:
                        if isinstance(img, dict) and 'url' in img:
                            result['images'].append({
                                'url': img.get('url', ''),
                                'width': img.get('width', 0),
                                'height': img.get('height', 0)
                            })
                    
                    return True
            
            return False
        except Exception as e:
            logger.error(f"从JSON数据解析信息失败: {str(e)}")
            return False
    
    def _clean_text(self, text: str) -> str:
        """
        清理文本，去除多余空格和特殊字符
        
        Args:
            text (str): 原始文本
            
        Returns:
            str: 清理后的文本
        """
        if not text:
            return ''
        
        # HTML解码
        text = html.unescape(text)
        
        # 去除多余空格和换行
        text = ' '.join(text.split())
        
        # 规范化Unicode字符
        text = unicodedata.normalize('NFKC', text)
        
        return text.strip()
    
    def _clean_image_url(self, url: str) -> str:
        """
        清理图片URL
        
        Args:
            url (str): 原始URL
            
        Returns:
            str: 清理后的URL
        """
        # 去除URL参数中的尺寸限制等
        if '?' in url:
            url = url.split('?')[0]
        
        # 确保URL完整
        if url.startswith('//'):
            url = 'https:' + url
        elif not url.startswith(('http://', 'https://')):
            # 相对路径，需要特殊处理
            pass
        
        return url.strip()

# 测试代码
if __name__ == "__main__":
    parser = XiaohongshuNoteParser()
    print("XiaohongshuNoteParser已初始化，可以用于解析小红书笔记详情页面")
    
    # 使用示例
    sample_url = "https://www.xiaohongshu.com/explore/68466bed00000000210052ab?xsec_token=ABUovRXRNJwaMgDWz5M081lXniumlzApIMQEorqv_8Aco=&xsec_source=pc_search"
    note_id = parser.extract_note_id_from_url(sample_url)
    print(f"从URL提取的笔记ID: {note_id}")
    print("需要提供HTML内容才能进行完整解析")