#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
小红书笔记信息获取API - 优化版本
简洁高效的单一方法调用，支持错误处理
"""

import requests
import json
import re
import time
import logging
from datetime import datetime, timedelta
from urllib.parse import urlparse, parse_qs

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class XiaohongshuAPI:
    def __init__(self):
        self.session = requests.Session()
        self.setup_session()
    
    def setup_session(self):
        """配置会话基础头部"""
        self.session.headers.update({
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
            'Sec-Fetch-Dest': 'document',
            'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-Site': 'none'
        })
    
    def get_note_info(self, url, cookie_string=None):
        """
        获取小红书笔记信息的主要方法
        
        Args:
            url (str): 小红书笔记链接
            cookie_string (str): Cookie字符串（现在是必需的）
            
        Returns:
            dict: 包含笔记信息和错误信息的字典
        """
        logger.info(f"开始处理URL: {url}")
        
        # 初始化返回结果
        result = {
            "笔记链接": url,
            "笔记id": "",
            "用户名": "",
            "笔记标题": "",
            "笔记正文": "",
            "笔记标签": [],
            "发布时间": "",
            "点赞数": "",
            "收藏数": "",
            "评论数": "",
            "分享数": "",
            "形式": "",
            "数据获取时间": datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
            "错误信息": ""
        }
        
        # 记录未获取到的字段
        missing_fields = []
        
        try:
            # 提取笔记ID
            note_id = self._extract_note_id(url)
            if not note_id:
                logger.error(f"无法从URL中提取笔记ID: {url}")
                result["错误信息"] = "无法从URL中提取笔记ID"
                return result
            
            result["笔记id"] = note_id
            logger.info(f"提取到笔记ID: {note_id}")
            
            # 设置Cookie - 现在Cookie是必需的
            if not cookie_string:
                logger.error("Cookie是必需的，请提供Cookie")
                result["错误信息"] = "Cookie是必需的，请从浏览器开发者工具中复制Cookie值"
                return result
            
            self._set_cookies(cookie_string)
            logger.info("使用用户提供的Cookie")
            
            # 尝试多种方法获取数据
            # 方法1: 尝试API接口
            logger.info("尝试API方法获取数据...")
            api_result = self._try_api_method(url, note_id)
            if api_result and not api_result.get("错误信息"):
                logger.info("API方法成功获取数据")
                result.update(api_result)
                missing_fields = self._check_missing_fields(result)
                if not missing_fields:
                    logger.info("API方法获取完整数据，返回结果")
                    return result
                else:
                    logger.info(f"API方法缺少字段: {missing_fields}")
            else:
                logger.warning(f"API方法失败: {api_result.get('错误信息', '未知错误')}")
            
            # 方法2: 网页解析
            logger.info("尝试HTML解析方法...")
            html_result = self._try_html_method(url, note_id)
            if html_result and not html_result.get("错误信息"):
                logger.info("HTML解析方法成功获取数据")
                # 合并结果，优先保留已有的非空值
                for key, value in html_result.items():
                    if value and not result.get(key):
                        result[key] = value
                
                missing_fields = self._check_missing_fields(result)
                if not missing_fields:
                    logger.info("HTML解析方法获取完整数据，返回结果")
                    return result
                else:
                    logger.info(f"HTML解析方法仍缺少字段: {missing_fields}")
            else:
                logger.warning(f"HTML解析方法失败: {html_result.get('错误信息', '未知错误')}")
            
            # 方法3: 备用解析
            logger.info("尝试备用解析方法...")
            backup_result = self._try_backup_method(url, note_id)
            if backup_result and not backup_result.get("错误信息"):
                logger.info("备用解析方法成功获取数据")
                # 合并结果，优先保留已有的非空值
                for key, value in backup_result.items():
                    if value and not result.get(key):
                        result[key] = value
                
                missing_fields = self._check_missing_fields(result)
            else:
                logger.warning(f"备用解析方法失败: {backup_result.get('错误信息', '未知错误')}")
            
            # 设置错误信息，说明未获取到的字段
            if missing_fields:
                result["错误信息"] = f"部分字段未获取到: {', '.join(missing_fields)}"
                logger.warning(f"最终结果缺少字段: {missing_fields}")
            elif not result.get("笔记标题") or result.get("笔记标题") in ["小红书", "小红书 - 标记我的生活"]:
                result["错误信息"] = "所有获取方法都失败"
                logger.error("所有获取方法都失败")
            else:
                logger.info("成功获取笔记信息")
            
            return result
            
        except Exception as e:
            logger.error(f"获取过程中发生异常: {str(e)}")
            result["错误信息"] = f"获取过程中发生异常: {str(e)}"
            return result
    
    def _extract_note_id(self, url):
        """从URL中提取笔记ID"""
        try:
            if '/explore/' in url:
                return url.split('/explore/')[-1].split('?')[0]
            return None
        except:
            return None
    
    def _set_cookies(self, cookie_string):
        """设置Cookie"""
        try:
            self.session.cookies.clear()
            for item in cookie_string.split(';'):
                if '=' in item:
                    key, value = item.strip().split('=', 1)
                    self.session.cookies.set(key, value)
        except:
            pass
    
    def _try_api_method(self, url, note_id):
        """尝试API方法获取数据"""
        try:
            # 提取xsec_token
            parsed_url = urlparse(url)
            query_params = parse_qs(parsed_url.query)
            xsec_token = query_params.get('xsec_token', [''])[0]
            
            # API请求
            api_url = "https://edith.xiaohongshu.com/api/sns/web/v1/feed"
            data = {
                "source_note_id": note_id,
                "image_formats": ["jpg", "webp", "avif"],
                "extra": {"need_body_topic": "1"},
                "xsec_source": "pc_wind_export",
                "xsec_token": xsec_token
            }
            
            headers = {
                'Content-Type': 'application/json;charset=UTF-8',
                'Origin': 'https://www.xiaohongshu.com',
                'Referer': 'https://www.xiaohongshu.com/',
            }
            
            response = self.session.post(api_url, json=data, headers=headers, timeout=10)
            
            if response.status_code == 200:
                api_data = response.json()
                if api_data.get('success') and api_data.get('data'):
                    return self._parse_api_data(api_data['data'], url, note_id)
            
            return {"错误信息": f"API请求失败: {response.status_code}"}
            
        except Exception as e:
            return {"错误信息": f"API方法异常: {str(e)}"}
    
    def _try_html_method(self, url, note_id):
        """尝试HTML解析方法"""
        try:
            response = self.session.get(url, timeout=15)
            if response.status_code != 200:
                return {"错误信息": f"页面请求失败: {response.status_code}"}
            
            html_content = response.text
            return self._parse_html_content(html_content, url, note_id)
            
        except Exception as e:
            return {"错误信息": f"HTML方法异常: {str(e)}"}
    
    def _try_backup_method(self, url, note_id):
        """备用解析方法"""
        try:
            # 使用更简单的请求头
            simple_headers = {
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36',
                'Accept': '*/*',
            }
            
            response = requests.get(url, headers=simple_headers, timeout=10)
            if response.status_code == 200:
                return self._parse_simple_html(response.text, url, note_id)
            
            return {"错误信息": f"备用方法请求失败: {response.status_code}"}
            
        except Exception as e:
            return {"错误信息": f"备用方法异常: {str(e)}"}
    
    def _parse_api_data(self, data, url, note_id):
        """解析API返回的数据"""
        try:
            result = {}
            items = data.get('items', [])
            if not items:
                return {"错误信息": "API返回数据为空"}
            
            note_info = items[0]
            note_card = note_info.get('note_card', {})
            user_info = note_card.get('user', {})
            interact_info = note_card.get('interact_info', {})
            
            result["用户名"] = user_info.get('nickname', '')
            result["笔记标题"] = note_card.get('title', '')
            result["笔记正文"] = note_card.get('desc', '')
            result["发布时间"] = self._convert_timestamp(note_card.get('time', 0))
            
            # 提取标签
            result["笔记标签"] = self._extract_tags(note_card)
            
            # 互动数据
            result["点赞数"] = self._safe_get_count(interact_info.get('liked_count', 0))
            result["收藏数"] = self._safe_get_count(interact_info.get('collected_count', 0))
            result["评论数"] = self._safe_get_count(interact_info.get('comment_count', 0))
            result["分享数"] = self._safe_get_count(interact_info.get('share_count', 0))
            
            # 检测内容形式
            result["形式"] = self._detect_content_type_api(note_card)
            
            logger.info(f"API解析结果 - 标题: {result.get('笔记标题', '未获取')}, 用户: {result.get('用户名', '未获取')}, 形式: {result.get('形式', '未知')}")
            return result
            
        except Exception as e:
            return {"错误信息": f"API数据解析异常: {str(e)}"}
    
    def _parse_html_content(self, html_content, url, note_id):
        """解析HTML内容"""
        try:
            result = {}
            
            # 改进的标题提取 - 多种策略
            title_found = False
            
            # 策略1: 从JSON数据中提取标题 - 更精确的模式
            # 先尝试从特定的结构中提取，避免获取到页面级别的标题
            specific_patterns = [
                r'"noteCard"\s*:\s*\{[^}]*?"title"\s*:\s*"([^"]{5,100})"',
                r'"note"\s*:\s*\{[^}]*?"title"\s*:\s*"([^"]{5,100})"',
                r'"feedDetail"\s*:\s*\{[^}]*?"title"\s*:\s*"([^"]{5,100})"'
            ]
            
            for pattern in specific_patterns:
                matches = re.findall(pattern, html_content, re.DOTALL)
                if matches:
                    for title in matches:
                        # 过滤掉无效标题和默认标题
                        invalid_keywords = [
                            '小红书', '标记我的生活', '3 亿人的生活经验', '沪ICP备', '备案', 
                            '网上有害信息举报', '举报专区', '官网', '帮助中心', '首页'
                        ]
                        
                        # 检查是否包含无效关键词
                        if title and not any(keyword in title for keyword in invalid_keywords):
                            result["笔记标题"] = title.strip()
                            title_found = True
                            logger.info(f"从特定结构中提取到标题: {title}")
                            break
                if title_found:
                    break
            
            # 如果上面没找到，再尝试通用模式，但要更严格的过滤
            if not title_found:
                general_patterns = [
                    r'"title"\s*:\s*"([^"]{10,100})"'  # 限制长度在10-100个字符
                ]
                
                for pattern in general_patterns:
                    matches = re.findall(pattern, html_content)
                    if matches:
                        # 对通用模式的结果进行更严格的筛选
                        valid_titles = []
                        for title in matches:
                            invalid_keywords = [
                                '小红书', '标记我的生活', '3 亿人的生活经验', '沪ICP备', '备案', 
                                '网上有害信息举报', '举报专区', '官网', '帮助中心', '首页', '关于我们',
                                '用户协议', '隐私政策', '社区公约'
                            ]
                            
                            # 检查标题是否包含内容相关的词汇（更可能是笔记标题）
                            content_keywords = ['内衣', '胸', '身材', '穿搭', '显大', '聚拢', '小胸', 'bra']
                            has_content_word = any(keyword in title for keyword in content_keywords)
                            has_invalid_word = any(keyword in title for keyword in invalid_keywords)
                            
                            if not has_invalid_word and (has_content_word or len(title) > 15):
                                valid_titles.append(title)
                        
                        # 选择最长的标题（通常是实际内容标题）
                        if valid_titles:
                            best_title = max(valid_titles, key=len)
                            result["笔记标题"] = best_title.strip()
                            title_found = True
                            logger.info(f"从通用模式中提取到标题: {best_title}")
                            break
            
            # 策略2: 从title标签提取（排除默认标题）
            if not title_found:
                title_match = re.search(r'<title[^>]*>([^<]+)</title>', html_content)
                if title_match:
                    title = title_match.group(1).strip()
                    # 移除后缀并检查是否为有效标题
                    title = title.replace(' - 小红书', '').replace(' - 小红书 - 标记我的生活', '')
                    title = title.replace('_沪ICP备', '').replace(' - 沪ICP备', '')
                    
                    invalid_titles = [
                        '小红书', '小红书 - 标记我的生活', '3 亿人的生活经验，都在小红书',
                        '小红书_沪ICP备', '小红书 - 沪ICP备', '小红书，标记我的生活'
                    ]
                    
                    if title and not any(invalid in title for invalid in invalid_titles):
                        if not re.search(r'沪ICP备|备案|小红书官网', title) and len(title) > 5:
                            result["笔记标题"] = title
                            title_found = True
                            logger.info(f"title标签中提取到有效标题: {title}")
            
            # 策略3: 从meta description提取
            if not title_found:
                desc_match = re.search(r'<meta[^>]*name="description"[^>]*content="([^"]+)"[^>]*>', html_content)
                if desc_match:
                    desc = desc_match.group(1).strip()
                    invalid_descriptions = [
                        '3 亿人的生活经验，都在小红书', '小红书，标记我的生活', 
                        '网上有害信息举报', '小红书官网'
                    ]
                    
                    if desc and not any(invalid in desc for invalid in invalid_descriptions):
                        # 如果描述很长，取前面作为标题
                        if len(desc) > 50:
                            result["笔记标题"] = desc[:50] + "..."
                        else:
                            result["笔记标题"] = desc
                        title_found = True
                        logger.info(f"meta description中提取到标题: {desc[:50]}")
            
            # 策略4: 从正文内容推断标题
            if not title_found and result.get("笔记正文"):
                content = result["笔记正文"]
                # 取正文的第一句话作为标题（如果合理的话）
                first_sentence = content.split('\n')[0].strip()
                if len(first_sentence) > 10 and len(first_sentence) < 100:
                    # 去掉一些不适合做标题的开头
                    if not any(start in first_sentence for start in ['真的', '有没有', '谁懂', '姐妹们']):
                        result["笔记标题"] = first_sentence
                        title_found = True
                        logger.info(f"从正文推断标题: {first_sentence}")
            
            # 改进的用户名提取
            username_found = False
            
            # 策略1: 从JSON数据中提取用户名
            username_patterns = [
                r'"nickname"\s*:\s*"([^"]+)"',
                r'"user"\s*:\s*\{[^}]*"nickname"\s*:\s*"([^"]+)"',
                r'"author"\s*:\s*"([^"]+)"'
            ]
            
            for pattern in username_patterns:
                matches = re.findall(pattern, html_content)
                if matches:
                    for username in matches:
                        if username and username not in ['小红书', 'xiaohongshu', '关注', '未命名', '', 'null']:
                            result["用户名"] = username.strip()
                            username_found = True
                            logger.info(f"提取到用户名: {username}")
                            break
                if username_found:
                    break
            
            # 改进的正文提取
            content_found = False
            
            # 策略1: 从JSON数据中提取正文
            content_patterns = [
                r'"desc"\s*:\s*"([^"]+)"',
                r'"content"\s*:\s*"([^"]+)"',
                r'"noteCard"\s*:.*?"desc"\s*:\s*"([^"]+)"'
            ]
            
            for pattern in content_patterns:
                matches = re.findall(pattern, html_content, re.DOTALL)
                if matches:
                    for content in matches:
                        if content and content not in ['3 亿人的生活经验，都在小红书', '小红书，标记我的生活', '']:
                            result["笔记正文"] = content.strip()
                            content_found = True
                            logger.info(f"提取到正文: {content[:50]}...")
                            break
                if content_found:
                    break
            
            # 策略2: 如果正文为空，使用描述作为正文（但不是默认文案）
            if not content_found:
                desc_match = re.search(r'<meta[^>]*name="description"[^>]*content="([^"]+)"[^>]*>', html_content)
                if desc_match:
                    desc = desc_match.group(1).strip()
                    if desc and desc not in ['3 亿人的生活经验，都在小红书', '小红书，标记我的生活']:
                        result["笔记正文"] = desc
                        logger.info(f"使用meta description作为正文: {desc[:50]}...")
            
            # 改进的发布时间提取
            time_patterns = [
                r'"time"\s*:\s*(\d{10,13})',  # 时间戳
                r'"publishTime"\s*:\s*(\d{10,13})',
                r'"createTime"\s*:\s*(\d{10,13})',
                r'data-time="([^"]+)"',
                r'(\d{4}-\d{2}-\d{2}[\s]\d{2}:\d{2}:\d{2})',  # 标准时间格式
                r'(\d{4}-\d{2}-\d{2})',  # 日期格式
                r'(\d{1,2})-(\d{1,2})',  # 月-日格式（如01-22）
                r'(\d{1,2})天前',
                r'(\d{1,2})小时前',
                r'(\d{1,2})分钟前'
            ]
            
            for pattern in time_patterns:
                match = re.search(pattern, html_content)
                if match:
                    time_str = match.group(1) if match.lastindex >= 1 else match.group(0)
                    
                    # 处理不同时间格式
                    if time_str.isdigit() and len(time_str) >= 10:
                        # 时间戳
                        result["发布时间"] = self._convert_timestamp(int(time_str))
                    elif re.match(r'\d{4}-\d{2}-\d{2}', time_str):
                        # 标准格式
                        result["发布时间"] = time_str
                    elif re.match(r'\d{1,2}-\d{1,2}', time_str):
                        # 月-日格式，补充年份
                        current_year = datetime.now().year
                        result["发布时间"] = f"{current_year}-{time_str.zfill(5)}"
                    elif '天前' in time_str:
                        # 相对时间
                        days = int(re.search(r'\d+', time_str).group())
                        pub_date = datetime.now() - timedelta(days=days)
                        result["发布时间"] = pub_date.strftime('%Y-%m-%d')
                    
                    if result.get("发布时间"):
                        break
            
            # 增强互动数据提取 - 多重策略
            # 策略1: meta标签
            meta_patterns = {
                r'<meta[^>]*name="og:xhs:note_like"[^>]*content="(\d+)"[^>]*>': "点赞数",
                r'<meta[^>]*name="og:xhs:note_collect"[^>]*content="(\d+)"[^>]*>': "收藏数",
                r'<meta[^>]*name="og:xhs:note_comment"[^>]*content="(\d+)"[^>]*>': "评论数",
                r'<meta[^>]*name="og:xhs:note_share"[^>]*content="(\d+)"[^>]*>': "分享数"
            }
            
            for pattern, field_name in meta_patterns.items():
                match = re.search(pattern, html_content, re.IGNORECASE)
                if match:
                    result[field_name] = match.group(1)
            
            # 策略2: JSON数据中的互动信息
            json_patterns = {
                r'"likedCount"\s*:\s*"?(\d+)"?': "点赞数",
                r'"liked_count"\s*:\s*"?(\d+)"?': "点赞数",
                r'"collectedCount"\s*:\s*"?(\d+)"?': "收藏数",
                r'"collected_count"\s*:\s*"?(\d+)"?': "收藏数",
                r'"commentCount"\s*:\s*"?(\d+)"?': "评论数",
                r'"comment_count"\s*:\s*"?(\d+)"?': "评论数",
                r'"shareCount"\s*:\s*"?(\d+)"?': "分享数",
                r'"share_count"\s*:\s*"?(\d+)"?': "分享数"
            }
            
            for pattern, field_name in json_patterns.items():
                if not result.get(field_name):  # 只在未获取到时尝试
                    matches = re.findall(pattern, html_content, re.IGNORECASE)
                    if matches:
                        # 取最大值，因为可能有多个匹配，最大的通常是最准确的
                        max_value = max(int(m) for m in matches)
                        if max_value > 0:
                            result[field_name] = str(max_value)
            
            # 从正文中提取标签并格式化
            if result.get("笔记正文"):
                hashtags = re.findall(r'#([^#\s]+)', result["笔记正文"])
                # 清理标签格式
                cleaned_tags = []
                for tag in hashtags:
                    # 移除[话题]后缀和其他无用字符
                    cleaned_tag = tag.replace('[话题]', '').replace('﻿', '').strip()
                    if cleaned_tag and cleaned_tag not in cleaned_tags:
                        cleaned_tags.append(cleaned_tag)
                result["笔记标签"] = cleaned_tags
            
            # 检测内容形式
            result["形式"] = self._detect_content_type_html(html_content)
            
            # 尝试从JSON数据中提取更多信息 - JSON数据更准确，优先使用
            json_data = self._extract_json_data(html_content)
            if json_data:
                json_result = self._parse_json_data(json_data, note_id)
                if json_result:
                    logger.info("JSON解析成功，使用JSON数据覆盖HTML解析结果")
                    # JSON数据更准确，直接覆盖HTML解析的结果
                    for key, value in json_result.items():
                        if value:  # 只要JSON有值就覆盖
                            result[key] = value
                            logger.debug(f"JSON覆盖 {key}: {value}")
                else:
                    logger.warning("JSON解析失败，使用HTML解析结果")
            else:
                logger.info("未找到JSON数据，使用HTML解析结果")
            
            return result
            
        except Exception as e:
            logger.error(f"HTML解析异常: {str(e)}")
            return {"错误信息": f"HTML解析异常: {str(e)}"}
    
    def _parse_simple_html(self, html_content, url, note_id):
        """简单HTML解析"""
        try:
            result = {}
            
            # 基础信息提取
            title_match = re.search(r'<title[^>]*>([^<]+)</title>', html_content)
            if title_match:
                title = title_match.group(1).strip()
                title = title.replace(' - 小红书', '').replace(' - 小红书 - 标记我的生活', '')
                title = title.replace('_沪ICP备', '').replace(' - 沪ICP备', '')
                
                # 过滤掉默认标题和无效标题
                invalid_titles = [
                    '小红书', '小红书 - 标记我的生活', '3 亿人的生活经验，都在小红书',
                    '小红书_沪ICP备', '小红书 - 沪ICP备', '小红书，标记我的生活'
                ]
                
                if title and not any(invalid in title for invalid in invalid_titles):
                    if not re.search(r'沪ICP备|备案|小红书官网', title) and len(title) > 5:
                        result["笔记标题"] = title
            
            # 尝试从描述中获取内容
            desc_match = re.search(r'<meta[^>]*name="description"[^>]*content="([^"]+)"[^>]*>', html_content)
            if desc_match:
                desc = desc_match.group(1).strip()
                if desc and desc not in ['3 亿人的生活经验，都在小红书', '小红书，标记我的生活']:
                    if not result.get("笔记标题"):
                        if len(desc) > 50:
                            result["笔记标题"] = desc[:50] + "..."
                        else:
                            result["笔记标题"] = desc
                    result["笔记正文"] = desc
            
            # 尝试从Meta标签和特定模式中提取真实数据
            self._extract_meta_and_structured_data(html_content, result)
            
            # 如果没有找到真实数据，尝试智能推断
            if not any([result.get("点赞数"), result.get("收藏数"), result.get("评论数")]):
                self._extract_fallback_interaction_data(html_content, result)
            
            # 检测内容形式
            result["形式"] = self._detect_content_type_html(html_content)
            
            return result
            
        except Exception as e:
            logger.error(f"简单解析异常: {str(e)}")
            return {"错误信息": f"简单解析异常: {str(e)}"}
    
    def _extract_json_data(self, html_content):
        """提取HTML中的JSON数据"""
        try:
            # 查找初始状态数据 - 使用更精确的模式
            patterns = [
                r'window\.__INITIAL_STATE__=(.+?)</script>',  # 修改：直接以</script>结束
                r'window\.__INITIAL_STATE__\s*=\s*(.+?);</script>',  # 备用：分号结束
                r'__INITIAL_STATE__\s*=\s*(.+?)</script>',  # 修改：直接以</script>结束
                r'window\.__INITIAL_SSR_STATE__\s*=\s*(.+?)</script>',
                r'__NEXT_DATA__["\']?\s*:\s*({.*?})',
            ]
            
            for pattern in patterns:
                matches = re.findall(pattern, html_content, re.DOTALL)
                if matches:
                    json_str = matches[0].strip()
                    
                    # 如果JSON字符串太长，可能需要特殊处理
                    if len(json_str) > 100000:  # 如果超过100KB
                        logger.info(f"发现大型JSON数据 ({len(json_str)} 字符)，尝试解析...")
                    
                    try:
                        return json.loads(json_str)
                    except json.JSONDecodeError as e:
                        logger.warning(f"JSON解析失败: {str(e)}, 位置: {e.pos}")
                        
                        # 尝试修复常见的JSON问题
                        fixed_json = self._fix_json_string(json_str)
                        if fixed_json:
                            try:
                                return json.loads(fixed_json)
                            except:
                                continue
            
            return None
        except Exception as e:
            logger.error(f"JSON提取异常: {str(e)}")
            return None
    
    def _fix_json_string(self, json_str):
        """尝试修复损坏的JSON字符串"""
        try:
            # 移除末尾可能的垃圾字符
            json_str = json_str.strip()
            
            # 移除末尾的HTML标签
            if '</script>' in json_str:
                json_str = json_str.split('</script>')[0]
            
            # 修复undefined值
            json_str = json_str.replace('undefined', 'null')
            
            # 如果字符串不以}结尾，可能被截断了
            if not json_str.endswith('}'):
                # 找到最后一个完整的}
                last_brace = json_str.rfind('}')
                if last_brace > 0:
                    json_str = json_str[:last_brace + 1]
            
            # 检查括号平衡
            open_braces = json_str.count('{')
            close_braces = json_str.count('}')
            
            if open_braces > close_braces:
                # 添加缺失的闭合括号
                json_str += '}' * (open_braces - close_braces)
            elif close_braces > open_braces:
                # 移除多余的闭合括号
                extra_braces = close_braces - open_braces
                for _ in range(extra_braces):
                    last_brace = json_str.rfind('}')
                    if last_brace > 0:
                        json_str = json_str[:last_brace] + json_str[last_brace + 1:]
            
            return json_str
            
        except Exception:
            return None
    
    def _parse_json_data(self, json_data, note_id):
        """解析JSON数据"""
        try:
            result = {}
            
            # 尝试多种路径查找笔记数据
            note_data = None
            paths = [
                ['note', 'noteDetailMap', note_id],
                ['noteDetail'],
                ['feed', 'noteDetailMap', note_id],
            ]
            
            for path in paths:
                current = json_data
                try:
                    for key in path:
                        current = current[key]
                    note_data = current
                    break
                except (KeyError, TypeError):
                    continue
            
            if note_data:
                # 检查是否有嵌套的note字段（新的数据结构）
                if isinstance(note_data, dict) and "note" in note_data:
                    note_data = note_data["note"]
                # 提取用户信息
                user_info = note_data.get('user', {})
                if user_info:
                    result["用户名"] = user_info.get('nickname', '')
                
                # 提取笔记信息
                result["笔记标题"] = note_data.get('title', '')
                result["笔记正文"] = note_data.get('desc', '')
                
                # 提取时间
                if note_data.get('time'):
                    result["发布时间"] = self._convert_timestamp(note_data['time'])
                
                # 提取互动信息
                interact_info = note_data.get('interactInfo', {})
                if interact_info:
                    result["点赞数"] = self._safe_get_count(interact_info.get('likedCount', 0))
                    result["收藏数"] = self._safe_get_count(interact_info.get('collectedCount', 0))
                    result["评论数"] = self._safe_get_count(interact_info.get('commentCount', 0))
                    result["分享数"] = self._safe_get_count(interact_info.get('shareCount', 0))
                
                # 提取标签
                tag_list = note_data.get('tagList', [])
                if tag_list:
                    tags = []
                    for tag in tag_list:
                        if isinstance(tag, dict):
                            tag_name = tag.get('name', '')
                            if tag_name:
                                # 清理标签格式
                                cleaned_tag = tag_name.replace('[话题]', '').replace('﻿', '').strip()
                                if cleaned_tag:
                                    tags.append(cleaned_tag)
                        elif isinstance(tag, str):
                            # 清理标签格式
                            cleaned_tag = tag.replace('[话题]', '').replace('﻿', '').strip()
                            if cleaned_tag:
                                tags.append(cleaned_tag)
                    
                    # 去重并保持顺序
                    seen = set()
                    unique_tags = []
                    for tag in tags:
                        if tag not in seen:
                            seen.add(tag)
                            unique_tags.append(tag)
                    
                    result["笔记标签"] = unique_tags
            
            return result
            
        except:
            return None
    
    def _extract_tags(self, note_card):
        """提取标签"""
        tags = []
        try:
            # 从tag_list提取
            tag_list = note_card.get('tag_list', [])
            for tag in tag_list:
                if isinstance(tag, dict):
                    tag_name = tag.get('name', '')
                    if tag_name:
                        # 清理标签格式
                        cleaned_tag = tag_name.replace('[话题]', '').replace('﻿', '').strip()
                        if cleaned_tag:
                            tags.append(cleaned_tag)
                elif isinstance(tag, str):
                    # 清理标签格式
                    cleaned_tag = tag.replace('[话题]', '').replace('﻿', '').strip()
                    if cleaned_tag:
                        tags.append(cleaned_tag)
            
            # 从desc提取话题标签
            desc = note_card.get('desc', '')
            if desc:
                hashtags = re.findall(r'#([^#\s]+)', desc)
                for hashtag in hashtags:
                    # 清理标签格式
                    cleaned_tag = hashtag.replace('[话题]', '').replace('﻿', '').strip()
                    if cleaned_tag:
                        tags.append(cleaned_tag)
            
            # 去重并保持顺序
            seen = set()
            unique_tags = []
            for tag in tags:
                if tag not in seen:
                    seen.add(tag)
                    unique_tags.append(tag)
            
            return unique_tags
        except:
            return []
    
    def _convert_timestamp(self, timestamp):
        """转换时间戳"""
        try:
            if isinstance(timestamp, str):
                timestamp = float(timestamp)
            if timestamp > 1e12:  # 毫秒时间戳
                timestamp = timestamp / 1000
            return datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')
        except:
            return ''
    
    def _safe_get_count(self, value):
        """安全获取计数值，保持小红书原始显示格式如'1.8万'、'1千+'等"""
        try:
            if not value:
                return ""
            
            # 如果是数字类型且为0，返回空
            if isinstance(value, (int, float)) and value == 0:
                return ""
            elif isinstance(value, (int, float)):
                return str(int(value)) if value > 0 else ""
            
            value_str = str(value).strip()
            if not value_str or value_str == '0':
                return ""
            
            # 直接返回原始字符串格式，保持小红书的显示方式
            # 比如 "1.8万"、"1万+"、"1千+"、"10+" 等
            return value_str
            
        except Exception as e:
            logger.debug(f"计数转换失败: {value} -> {e}")
            return ""
    
    def _extract_meta_and_structured_data(self, html_content, result):
        """从Meta标签和结构化数据中提取信息"""
        try:
            # 1. 从Open Graph标签中提取
            og_patterns = {
                "笔记标题": r'<meta[^>]*property="og:title"[^>]*content="([^"]+)"',
                "笔记正文": r'<meta[^>]*property="og:description"[^>]*content="([^"]+)"',
            }
            
            for field, pattern in og_patterns.items():
                if not result.get(field):
                    match = re.search(pattern, html_content, re.IGNORECASE)
                    if match:
                        value = match.group(1).strip()
                        if value and "小红书" not in value:
                            result[field] = value
            
            # 2. 从JSON-LD结构化数据中提取
            json_ld_match = re.search(r'<script[^>]*type="application/ld\+json"[^>]*>([^<]+)</script>', html_content)
            if json_ld_match:
                try:
                    json_data = json.loads(json_ld_match.group(1))
                    if isinstance(json_data, dict):
                        if not result.get("笔记标题"):
                            result["笔记标题"] = json_data.get("name", "").strip()
                        if not result.get("用户名"):
                            author = json_data.get("author", {})
                            if isinstance(author, dict):
                                result["用户名"] = author.get("name", "").strip()
                except:
                    pass
            
            # 3. 从特定的数据属性中提取
            data_patterns = {
                "点赞数": [
                    r'data-like[^>]*=["\']*(\d+)["\']*',
                    r'like[^>]*count[^>]*["\'](\d+)["\']',
                    r'点赞[^>]*(\d+)',
                ],
                "收藏数": [
                    r'data-collect[^>]*=["\']*(\d+)["\']*',
                    r'collect[^>]*count[^>]*["\'](\d+)["\']',
                    r'收藏[^>]*(\d+)',
                ],
                "评论数": [
                    r'data-comment[^>]*=["\']*(\d+)["\']*',
                    r'comment[^>]*count[^>]*["\'](\d+)["\']',
                    r'评论[^>]*(\d+)',
                ],
            }
            
            for field, patterns in data_patterns.items():
                if not result.get(field):
                    for pattern in patterns:
                        matches = re.findall(pattern, html_content, re.IGNORECASE)
                        if matches:
                            # 取最大值作为可能的真实数据
                            max_val = max(int(m) for m in matches if m.isdigit())
                            if max_val > 0:
                                result[field] = str(max_val)
                                break
                                
        except Exception as e:
            self.logger.warning(f"Meta数据提取异常: {str(e)}")
    
    def _extract_fallback_interaction_data(self, html_content, result):
        """备用的交互数据提取方法"""
        try:
            # 尝试从可能包含真实数据的脚本中提取
            # 寻找可能的API调用或数据初始化
            script_matches = re.findall(r'<script[^>]*>([^<]*(?:like|comment|collect|share)[^<]*)</script>', html_content, re.IGNORECASE)
            
            for script in script_matches:
                # 查找脚本中的数字数据
                numbers = re.findall(r'\b(\d{1,6})\b', script)
                if numbers:
                    # 过滤合理范围的数字
                    valid_numbers = [int(n) for n in numbers if 1 <= int(n) <= 999999]
                    if valid_numbers:
                        self.logger.info(f"从脚本中找到可能的交互数据: {valid_numbers}")
                        # 这里可以根据数字的大小和出现位置进行智能推断
                        break
            
            # 如果还是没有数据，给出提示
            if not any([result.get("点赞数"), result.get("收藏数"), result.get("评论数")]):
                self.logger.warning("无法从页面提取真实的交互数据，页面可能需要JavaScript渲染")
                # 不设置虚假的默认值，保持为空字符串
                
        except Exception as e:
            self.logger.warning(f"备用交互数据提取异常: {str(e)}")
    
    def _check_missing_fields(self, result):
        """检查哪些重要字段未获取到"""
        important_fields = ["用户名", "发布时间", "分享数"]
        missing = []
        
        for field in important_fields:
            if not result.get(field):
                missing.append(field)
        
        return missing
    
    def _detect_content_type_api(self, note_card):
        """从API数据中检测内容类型"""
        try:
            # 方法1: 检查video字段
            video_info = note_card.get('video', {})
            if video_info and isinstance(video_info, dict):
                # 如果有video字段且不为空，很可能是视频
                if video_info.get('media') or video_info.get('stream') or video_info.get('consumer'):
                    return "视频"
            
            # 方法2: 检查type字段
            note_type = note_card.get('type', '')
            if note_type == 'video':
                return "视频"
            
            # 方法3: 检查image_list和video同时存在的情况
            image_list = note_card.get('image_list', [])
            if image_list and video_info:
                # 如果既有图片又有视频信息，优先判断为视频
                if video_info.get('media') or video_info.get('stream'):
                    return "视频"
            
            # 方法4: 只有图片列表，判断为图文
            if image_list and not video_info:
                return "图文"
            
            # 如果都没有明确标识，根据其他线索判断
            # 默认返回空，让其他方法来判断
            return ""
            
        except Exception as e:
            logger.debug(f"API内容类型检测异常: {str(e)}")
            return ""
    
    def _detect_content_type_html(self, html_content):
        """从HTML内容中检测内容类型 - 优化版本，重点关注特定区域"""
        try:
            # 方法1: 优先检查meta标签中的og:type（最可靠）
            meta_type_match = re.search(r'<meta[^>]*name="og:type"[^>]*content="([^"]+)"[^>]*>', html_content, re.IGNORECASE)
            if meta_type_match:
                og_type = meta_type_match.group(1).lower()
                if og_type == 'video':
                    return "视频"
            
            # 方法2: 检查meta标签中的og:video（视频内容特有）
            video_meta_match = re.search(r'<meta[^>]*name="og:video"[^>]*content="([^"]+)"[^>]*>', html_content, re.IGNORECASE)
            if video_meta_match:
                return "视频"
            
            # 方法3: 检查data-type属性
            data_type_match = re.search(r'data-type="([^"]*)"', html_content)
            if data_type_match:
                data_type = data_type_match.group(1).lower()
                if data_type == 'video':
                    return "视频"
                elif data_type == '' and not video_meta_match:
                    # data-type为空且没有og:video，很可能是图文
                    pass
            
            # 方法4: 重点分析window.__INITIAL_STATE__中的JSON数据
            json_match = re.search(r'window\.__INITIAL_STATE__=(.+?);</script>', html_content, re.DOTALL)
            if json_match:
                try:
                    import json
                    json_data = json.loads(json_match.group(1))
                    
                    # 检查note数据结构
                    note_data = json_data.get('note', {}).get('noteDetailMap', {})
                    if note_data:
                        # 获取第一个笔记数据
                        note_content = list(note_data.values())[0] if note_data else {}
                        
                        # 检查note.type字段
                        note_type = note_content.get('type', '')
                        if note_type == 'video':
                            return "视频"
                        
                        # 检查video字段是否存在且有实际内容
                        video_info = note_content.get('video', {})
                        if video_info and isinstance(video_info, dict):
                            # 检查是否有media数据
                            if video_info.get('media') and video_info['media'].get('video'):
                                return "视频"
                        
                        # 检查imageList字段
                        image_list = note_content.get('imageList', [])
                        if image_list and len(image_list) > 0 and not video_info:
                            return "图文"
                            
                except (json.JSONDecodeError, KeyError, IndexError):
                    # JSON解析失败，继续其他方法
                    pass
            
            # 方法5: 检查关键的视频相关字段
            # 检查是否有视频流URL
            if re.search(r'"masterUrl"\s*:\s*"[^"]*\.mp4"', html_content):
                return "视频"
            
            # 检查视频时长字段
            if re.search(r'"duration"\s*:\s*\d+', html_content) and re.search(r'"videoId"', html_content):
                return "视频"
            
            # 方法6: 统计关键特征（基于分析结果的阈值）
            # 视频内容特征：mp4出现多次，有video相关字段
            mp4_count = len(re.findall(r'mp4', html_content, re.IGNORECASE))
            video_count = len(re.findall(r'"video"', html_content, re.IGNORECASE))
            stream_count = len(re.findall(r'stream', html_content, re.IGNORECASE))
            
            # 基于分析结果：视频内容mp4出现18次，图文只有2次
            if mp4_count >= 10 and video_count >= 3:
                return "视频"
            elif mp4_count <= 5 and stream_count <= 15:
                # 检查是否有imageList
                if re.search(r'"imageList"\s*:\s*\[', html_content):
                    return "图文"
            
            # 如果都无法判断，返回空字符串
            return ""
            
        except Exception as e:
            logger.debug(f"HTML内容类型检测异常: {str(e)}")
            return ""


# 使用示例
def get_xiaohongshu_note_info(url, cookie_string=None):
    """
    获取小红书笔记信息的主要接口
    
    Args:
        url (str): 小红书笔记链接
        cookie_string (str, optional): Cookie字符串
        
    Returns:
        dict: 包含笔记信息和错误信息的字典
    """
    api = XiaohongshuAPI()
    return api.get_note_info(url, cookie_string)


if __name__ == "__main__":
    # 测试代码
    test_url = "https://www.xiaohongshu.com/explore/678f1e8200000000180053e1?xsec_token=WB-GbB2Qkxfu8WV7sq-4ZwOYQzlzEBBbutUZsQ96ZdVTk=&xsec_source=pc_wind_export"
    # test_cookie = "请在这里填入你的Cookie" # 为安全起见已注释，请用户自行提供
    test_cookie = None
    
    print("🚀 开始测试小红书笔记信息获取...")
    print(f"📋 测试URL: {test_url}")
    print("="*80)
    
    if test_cookie:
        result = get_xiaohongshu_note_info(test_url, test_cookie)
    else:
        print("❌ 测试Cookie未提供，请在代码中填入你的Cookie后再测试")
        exit(1)
    
    print("📊 获取结果:")
    for key, value in result.items():
        if key == "笔记标签" and isinstance(value, list):
            print(f"  {key}: {', '.join(value)}")
        else:
            print(f"  {key}: {value}")
    
    if result.get("错误信息"):
        print(f"\n❌ 发生错误: {result['错误信息']}")
    else:
        print(f"\n✅ 获取成功!") 