from bs4 import BeautifulSoup
import re
import json
from datetime import datetime

class DataExtractor:
    """
    数据提取器，负责从HTML页面中提取结构化数据
    """
    
    def __init__(self):
        # CSS选择器配置
        self.selectors = {
            # 笔记基本信息选择器
            'note_content': '.note-content',
            'author_name': 'span.username',
            'publish_time': 'div.time',
            'note_title': '.title',
            
            # 图片选择器
            'note_images': 'img.note-slider-img, img[data-xhs-img]',
            
            # 统计数据选择器
            'likes_count': '.like-count',
            'comments_count': '.comment-count',
            'collects_count': '.collect-count',
            
            # 评论选择器
            'comments': '.comment-item',
            'comment_user': '.comment-user',
            'comment_content': '.comment-content',
            'comment_time': '.comment-time',
            'comment_likes': '.comment-like-count',
            
            # 用户信息选择器
            'user_avatar': '.avatar-img',
            'follow_button': '.follow-btn',
        }
    
    def extract_note_info(self, html):
        """
        提取笔记基本信息
        
        Args:
            html (str): 页面HTML内容
            
        Returns:
            dict: 笔记基本信息
        """
        try:
            soup = BeautifulSoup(html, 'html.parser')
            note_data = {
                'note_id': None,
                'title': '',
                'content': '',
                'author': {},
                'publish_time': '',
                'stats': {
                    'likes': 0,
                    'comments': 0,
                    'collects': 0,
                },
                'images': [],
                'comments': [],
                'url': '',
                'crawled_at': datetime.now().isoformat()
            }
            
            # 提取笔记标题
            title_elem = soup.select_one(self.selectors['note_title'])
            if title_elem:
                note_data['title'] = title_elem.get_text().strip()
            
            # 提取笔记内容
            content_elem = soup.select_one(self.selectors['note_content'])
            if content_elem:
                note_data['content'] = content_elem.get_text().strip()
            
            # 提取作者信息
            note_data['author'] = self.extract_author_info(html)
            
            # 提取发布时间
            time_elem = soup.select_one(self.selectors['publish_time'])
            if time_elem:
                note_data['publish_time'] = time_elem.get_text().strip()
            
            # 提取统计数据
            note_data['stats'] = self.extract_stats(html)
            
            # 提取图片
            note_data['images'] = self.extract_images(html)
            
            # 提取评论
            note_data['comments'] = self.extract_comments(html)
            
            return note_data
        except Exception as e:
            print(f"提取笔记信息出错: {e}")
            return None
    
    def extract_author_info(self, html):
        """
        提取作者信息
        
        Args:
            html (str): 页面HTML内容
            
        Returns:
            dict: 作者信息
        """
        try:
            soup = BeautifulSoup(html, 'html.parser')
            author_info = {
                'name': '',
                'id': '',
                'avatar': ''
            }
            
            # 提取作者名称
            name_elem = soup.select_one(self.selectors['author_name'])
            if name_elem:
                author_info['name'] = name_elem.get_text().strip()
            
            # 提取作者头像
            avatar_elem = soup.select_one(self.selectors['user_avatar'])
            if avatar_elem and avatar_elem.has_attr('src'):
                author_info['avatar'] = avatar_elem['src']
            
            # 尝试从JavaScript中提取作者ID
            author_id_match = re.search(r'"user_id":"([^"]+)"', html)
            if author_id_match:
                author_info['id'] = author_id_match.group(1)
            
            return author_info
        except Exception as e:
            print(f"提取作者信息出错: {e}")
            return {'name': '', 'id': '', 'avatar': ''}
    
    def extract_images(self, html):
        """
        提取图片URL列表
        
        Args:
            html (str): 页面HTML内容
            
        Returns:
            list: 图片信息列表
        """
        try:
            soup = BeautifulSoup(html, 'html.parser')
            images = []
            
            # 选择所有图片元素
            img_elements = soup.select(self.selectors['note_images'])
            
            for img in img_elements:
                # 获取图片源URL
                img_url = ''
                if img.has_attr('src'):
                    img_url = img['src']
                elif img.has_attr('data-src'):
                    img_url = img['data-src']
                
                # 获取图片alt文本
                alt_text = img.get('alt', '').strip()
                
                if img_url:
                    images.append({
                        'src': img_url,
                        'alt': alt_text
                    })
            
            # 如果常规方法没找到，尝试从JSON数据中提取
            if not images:
                images = self._extract_images_from_json(html)
            
            return images
        except Exception as e:
            print(f"提取图片信息出错: {e}")
            return []
    
    def extract_comments(self, html):
        """
        提取评论信息
        
        Args:
            html (str): 页面HTML内容
            
        Returns:
            list: 评论列表
        """
        try:
            soup = BeautifulSoup(html, 'html.parser')
            comments = []
            
            # 获取所有评论项
            comment_elements = soup.select(self.selectors['comments'])
            
            for comment_elem in comment_elements:
                # 提取用户名
                user_elem = comment_elem.select_one(self.selectors['comment_user'])
                user_name = user_elem.get_text().strip() if user_elem else ''
                
                # 提取评论内容
                content_elem = comment_elem.select_one(self.selectors['comment_content'])
                content = content_elem.get_text().strip() if content_elem else ''
                
                # 提取评论时间
                time_elem = comment_elem.select_one(self.selectors['comment_time'])
                comment_time = time_elem.get_text().strip() if time_elem else ''
                
                # 提取点赞数
                likes_elem = comment_elem.select_one(self.selectors['comment_likes'])
                likes = 0
                if likes_elem:
                    likes_text = likes_elem.get_text().strip()
                    if likes_text:
                        try:
                            likes = int(likes_text)
                        except ValueError:
                            likes = 0
                
                if content:
                    comments.append({
                        'user': user_name,
                        'content': content,
                        'time': comment_time,
                        'likes': likes
                    })
            
            # 如果常规方法没找到，尝试从JSON数据中提取
            if not comments:
                comments = self._extract_comments_from_json(html)
            
            return comments
        except Exception as e:
            print(f"提取评论信息出错: {e}")
            return []
    
    def extract_stats(self, html):
        """
        提取点赞、收藏、评论等统计数据
        
        Args:
            html (str): 页面HTML内容
            
        Returns:
            dict: 统计数据
        """
        try:
            soup = BeautifulSoup(html, 'html.parser')
            stats = {
                'likes': 0,
                'comments': 0,
                'collects': 0
            }
            
            # 提取点赞数
            likes_elem = soup.select_one(self.selectors['likes_count'])
            if likes_elem:
                likes_text = likes_elem.get_text().strip()
                stats['likes'] = self._parse_count(likes_text)
            
            # 提取评论数
            comments_elem = soup.select_one(self.selectors['comments_count'])
            if comments_elem:
                comments_text = comments_elem.get_text().strip()
                stats['comments'] = self._parse_count(comments_text)
            
            # 提取收藏数
            collects_elem = soup.select_one(self.selectors['collects_count'])
            if collects_elem:
                collects_text = collects_elem.get_text().strip()
                stats['collects'] = self._parse_count(collects_text)
            
            # 如果常规方法没找到，尝试从JSON数据中提取
            if stats['likes'] == 0 and stats['comments'] == 0 and stats['collects'] == 0:
                json_stats = self._extract_stats_from_json(html)
                if json_stats:
                    stats.update(json_stats)
            
            return stats
        except Exception as e:
            print(f"提取统计数据出错: {e}")
            return {'likes': 0, 'comments': 0, 'collects': 0}
    
    def _parse_count(self, count_text):
        """
        解析数字文本，处理万、千等单位
        
        Args:
            count_text (str): 数字文本
            
        Returns:
            int: 解析后的数字
        """
        try:
            # 去除所有非数字和单位字符
            count_text = count_text.replace(',', '').strip()
            
            # 处理带单位的情况
            if '万' in count_text:
                num = float(re.search(r'([\d.]+)', count_text).group(1))
                return int(num * 10000)
            elif '千' in count_text:
                num = float(re.search(r'([\d.]+)', count_text).group(1))
                return int(num * 1000)
            else:
                # 纯数字
                return int(re.search(r'([\d.]+)', count_text).group(1))
        except Exception:
            return 0
    
    def _extract_images_from_json(self, html):
        """
        尝试从HTML中的JSON数据提取图片信息
        
        Args:
            html (str): 页面HTML内容
            
        Returns:
            list: 图片列表
        """
        try:
            # 尝试提取包含图片信息的JSON
            json_match = re.search(r'"images":\[([^\]]+)\]', html)
            if json_match:
                images_data = f"[{json_match.group(1)}]"
                # 尝试修复JSON格式并解析
                images_data = images_data.replace('\\"', '"')
                images = json.loads(images_data)
                return [{'src': img.get('url', '') if isinstance(img, dict) else img, 'alt': ''} for img in images]
            
            # 尝试另一种格式
            json_match = re.search(r'"content":\{"media":\[([^\]]+)\]\}', html)
            if json_match:
                media_data = f"[{json_match.group(1)}]"
                media_data = media_data.replace('\\"', '"')
                media = json.loads(media_data)
                return [{'src': m.get('url', ''), 'alt': m.get('alt', '')} for m in media if isinstance(m, dict)]
            
            return []
        except Exception:
            return []
    
    def _extract_comments_from_json(self, html):
        """
        尝试从HTML中的JSON数据提取评论信息
        
        Args:
            html (str): 页面HTML内容
            
        Returns:
            list: 评论列表
        """
        try:
            # 尝试提取包含评论信息的JSON
            json_match = re.search(r'"comments":\[([^\]]+)\]', html)
            if json_match:
                comments_data = f"[{json_match.group(1)}]"
                comments_data = comments_data.replace('\\"', '"')
                comments = json.loads(comments_data)
                return [{
                    'user': c.get('username', '') if isinstance(c, dict) else '',
                    'content': c.get('content', '') if isinstance(c, dict) else '',
                    'time': c.get('time', '') if isinstance(c, dict) else '',
                    'likes': int(c.get('likes', 0)) if isinstance(c, dict) else 0
                } for c in comments[:10]]  # 限制数量
            
            return []
        except Exception:
            return []
    
    def _extract_stats_from_json(self, html):
        """
        尝试从HTML中的JSON数据提取统计信息
        
        Args:
            html (str): 页面HTML内容
            
        Returns:
            dict: 统计数据
        """
        try:
            # 提取点赞数
            likes_match = re.search(r'"likes":(\d+)', html)
            comments_match = re.search(r'"comments":(\d+)', html)
            collects_match = re.search(r'"collects":(\d+)', html)
            
            stats = {}
            if likes_match:
                stats['likes'] = int(likes_match.group(1))
            if comments_match:
                stats['comments'] = int(comments_match.group(1))
            if collects_match:
                stats['collects'] = int(collects_match.group(1))
            
            return stats if stats else None
        except Exception:
            return None

# 测试代码
if __name__ == "__main__":
    extractor = DataExtractor()
    
    # 这里可以添加测试HTML内容进行测试
    # 例如：
    # with open('test_page.html', 'r', encoding='utf-8') as f:
    #     html = f.read()
    #     note_info = extractor.extract_note_info(html)
    #     print(json.dumps(note_info, ensure_ascii=False, indent=2))
    
    print("数据提取器初始化完成，可以用于提取小红书笔记数据")