from bot_api_v1.app.core.context import request_ctx
from bot_api_v1.app.utils.media_extrat_format import Media_extract_format
from bot_api_v1.app.services.business.media_service import MediaService
from bot_api_v1.app.core.logger import logger
from bot_api_v1.app.core.cache import async_cache_result
from bot_api_v1.app.core.config import settings
# Ensure AuthorInfo is imported if you attempt to construct it, though current logic sets it to None
from bot_api_v1.app.core.schemas import RequestContext, SearchNoteRequest, SearchNoteData, AuthorInfo
from fastapi import HTTPException, Request, status
from typing import Any, Dict, Optional, List
import datetime # Changed from 'import datetime' to 'from datetime import datetime, timedelta' for direct use
from datetime import datetime, timedelta # Added for timedelta
from datetime import datetime, timezone # 确保导入 timezone
from pydantic import HttpUrl # 确保导入 HttpUrl
from bot_api_v1.app.constants.media_info import MediaPlatform

import traceback # 在文件顶部导入
import json


# These would be instantiated as they are in your existing code
media_service = MediaService()
formatter = Media_extract_format()
# logger = logger # Assuming logger is already configured and available



# --- 辅助函数：从抖音数据构造 AuthorInfo ---
def create_author_from_douyin_data(douyin_item: Dict[str, Any]) -> Optional[AuthorInfo]:
    if not isinstance(douyin_item, dict):
        return None

    author_id = str(douyin_item.get('uid', ""))
    if not author_id: # 如果没有 uid，则认为无法构造有效作者信息
        return None

    # 拼接抖音用户主页 URL (通用格式)
    author_url = f"https://www.douyin.com/user/{douyin_item.get('sec_uid', '')}" if douyin_item.get('sec_uid') else ""
    
    # 抖音数据中没有直接的头像链接，通常API返回的是封面图中的作者头像信息，或者需要单独接口获取。
    # 'mark' 字段有时是作者昵称，'origin_cover' 或 'dynamic_cover' 是视频封面。
    # 这里的 'avatar' 字段在您 schemas.py 的 AuthorInfo 中定义了，但抖音关键词搜索返回的视频列表中，
    # 通常不直接包含作者的独立头像URL，而是昵称和ID。
    # 如果有更专门的作者信息接口，会返回更详细的头像等信息。
    # 我们暂时用空字符串，或者如果您的数据源有其他表示作者头像的字段，请替换。

    try:
        author = AuthorInfo(
            id=author_id,
            sec_uid=str(douyin_item.get('sec_uid', "")),
            nickname=str(douyin_item.get('nickname', douyin_item.get('mark', ""))), # 优先用nickname，其次mark
            avatar="", # 抖音关键词搜索结果通常不直接返回作者头像，用空或默认
            signature=str(douyin_item.get('signature', "")),
            url=author_url,
            # 以下字段在抖音关键词搜索的单个视频条目中通常不直接提供，会使用AuthorInfo的默认值
            # verified=douyin_item.get('verified_status_or_similar_field', False), # 需要确认实际字段名
            # follower_count=int(douyin_item.get('follower_count_field', 0)),
            # following_count=int(douyin_item.get('following_count_field', 0)),
            # total_likes_collections_count=int(douyin_item.get('total_user_digg_collect_field',0)),
            # total_post_count=int(douyin_item.get('total_user_post_field',0)),
            # region=douyin_item.get('user_region_field', "")
        )
        return author
    except Exception as e:
        # 考虑使用日志记录错误 logger.error(f"创建AuthorInfo失败: {e}", exc_info=True)
        print(f"创建抖音 AuthorInfo 失败: {e} for item: {douyin_item.get('id')}")
        return None

# --- 主要转换函数：将单个抖音条目转换为 SearchNoteData ---
def _transform_douyin_item_to_search_note_data(
    douyin_item: Dict[str, Any], 
    platform_name: str, # 例如 "douyin"
    default_point_cost: int = 1 # 默认消耗点数
) -> Optional[SearchNoteData]:
    if not isinstance(douyin_item, dict):
        return None

    try:
        # 处理发布时间
        publish_time_dt: Optional[datetime] = None
        create_timestamp = douyin_item.get('create_timestamp')
        if isinstance(create_timestamp, (int, float)):
            # 假设 create_timestamp 是秒级时间戳
            publish_time_dt = datetime.fromtimestamp(create_timestamp, tz=timezone.utc) # 建议使用带时区的时间
        elif isinstance(douyin_item.get('create_time'), str):
            try:
                # 尝试解析 "YYYY-MM-DD HH:MM:SS" 格式
                publish_time_dt = datetime.strptime(douyin_item['create_time'], '%Y-%m-%d %H:%M:%S')
                # 如果需要，可以本地化或转为UTC: .replace(tzinfo=timezone.utc)
            except ValueError:
                print(f"无法解析抖音 create_time 字符串: {douyin_item['create_time']}")
                pass # 解析失败则 publish_time_dt 保持 None

        # 处理时长 (从 "HH:MM:SS" 或 "MM:SS" 格式转为总秒数)
        duration_seconds = 0
        duration_str = douyin_item.get('duration', "00:00:00")
        if isinstance(duration_str, str):
            parts = list(map(int, duration_str.split(':')))
            if len(parts) == 3: # HH:MM:SS
                duration_seconds = parts[0] * 3600 + parts[1] * 60 + parts[2]
            elif len(parts) == 2: # MM:SS
                duration_seconds = parts[0] * 60 + parts[1]
            elif len(parts) == 1: # SS
                duration_seconds = parts[0]

        # 处理标签 (text_extra 或 tag)
        tags_list = douyin_item.get('text_extra', [])
        if not isinstance(tags_list, list): # 有些API可能直接返回字符串
             tags_list = str(tags_list).split(',') # 简单处理
        if not tags_list and isinstance(douyin_item.get('tag'), list): # 备用
            tags_list = douyin_item.get('tag', [])
        
        tags_str = ", ".join(filter(None, map(str, tags_list)))


        # 构造 AuthorInfo
        author_instance = create_author_from_douyin_data(douyin_item)

        # 确定视频链接，优先用 downloads，其次拼接
        video_url_to_use = str(douyin_item.get('downloads', ""))
        if not video_url_to_use and douyin_item.get('uri'): # uri 是视频文件标识
             # 实际的播放链接拼接规则可能更复杂，这只是一个通用占位
            video_url_to_use = f"https://www.douyin.com/video/{douyin_item.get('id')}" # 或者基于uri构造

        # 平台分享链接
        platform_url = str(douyin_item.get('share_url', ""))
        if not platform_url and douyin_item.get('id'):
            platform_url = f"https://www.douyin.com/video/{douyin_item.get('id')}"


        note_data = SearchNoteData(
            title=str(douyin_item.get('desc', "")),
            platform=platform_name,
            description=str(douyin_item.get('desc', "")), # 抖音通常用 desc 作为标题/描述
            publish_time=publish_time_dt,
            play_count=int(douyin_item.get('play_count', 0)) if douyin_item.get('play_count', -1) > -1 else 0, # play_count为-1时视为0
            like_count=int(douyin_item.get('digg_count', 0)),
            comment_count=int(douyin_item.get('comment_count', 0)) if douyin_item.get('comment_count', -1) > -1 else 0,
            share_count=int(douyin_item.get('share_count', 0)) if douyin_item.get('share_count', -1) > -1 else 0,
            collect_count=int(douyin_item.get('collect_count', 0)),
            author=author_instance,
            tags=tags_str,
            video_platform_url=platform_url,
            duration=duration_seconds,
            video_id=str(douyin_item.get('id', "")),
            video_url=video_url_to_use,
            cover_url=str(douyin_item.get('origin_cover', douyin_item.get('dynamic_cover', ""))), # 优先用 origin_cover
            point_cost=default_point_cost 
        )
        return note_data
    except Exception as e:
        # 考虑使用日志 logger.error(f"转换抖音条目失败 (ID: {douyin_item.get('id')}): {e}", exc_info=True)
        print(f"转换抖音条目 (ID: {douyin_item.get('id')}) 失败: {e}")
        return None

def create_author_from_api_user_data(user_data: Optional[Dict[str, Any]]) -> Optional[AuthorInfo]:
    if not user_data or not isinstance(user_data, dict):
        return None

    # 优先使用 'nick_name'，如果不存在则使用 'nickname'
    name_to_use = user_data.get('nick_name', user_data.get('nickname'))

    # 拼接用户主页 URL (小红书的通用格式，如果API不直接提供)
    # 如果 user_data 中直接有 'url' 字段，可以优先使用它
    author_url = user_data.get('url')
    if not author_url and user_data.get('user_id'):
        author_url = f"https://www.xiaohongshu.com/user/profile/{user_data.get('user_id')}"
    
    # 创建 AuthorInfo 实例，对于 AuthorInfo 中定义但 user_data 中没有的字段，
    # Pydantic 会使用 AuthorInfo 模型中定义的默认值。
    try:
        author = AuthorInfo(
            id=str(user_data.get('user_id', "")), # 确保id是字符串
            nickname=str(name_to_use or ""), # 确保nickname是字符串
            avatar=str(user_data.get('avatar', "")), # 确保avatar是字符串
            url=str(author_url or ""), # 确保url是字符串
            # 以下字段在您的 'user' 示例中没有，将依赖 AuthorInfo 中的默认值
            # 如果您的完整API响应包含这些，请在此处添加映射
            sec_uid=user_data.get('sec_uid'), # 如果API提供
            signature=user_data.get('signature'), # 如果API提供
            verified=user_data.get('verified'), # 如果API提供
            follower_count=int(user_data.get('follower_count', 0)) if user_data.get('follower_count') is not None else 0,
            following_count=int(user_data.get('following_count', 0)) if user_data.get('following_count') is not None else 0,
            total_likes_collections_count=int(user_data.get('total_likes_collections_count', 0)) if user_data.get('total_likes_collections_count') is not None else 0,
            total_post_count=int(user_data.get('total_post_count', 0)) if user_data.get('total_post_count') is not None else 0,
            region=user_data.get('region')
        )
        return author
    except Exception as e:
        print(f"Error creating AuthorInfo: {e}") # 或者使用日志库记录错误
        return None
# Helper function to parse relative time text from XHS
def _parse_publish_time_text(time_text: str, current_year: int) -> Optional[datetime]:
    """
    解析小红书相对时间文本 (如 "05-22", "6天前") 为 datetime 对象。
    Returns None if parsing fails.
    """
    now = datetime.now()
    if not time_text:
        return None
        
    if "天前" in time_text:
        try:
            days_ago = int(time_text.replace("天前", "").strip())
            return now - timedelta(days=days_ago)
        except ValueError:
            logger.warning(f"无法解析天数格式的发布时间: {time_text}")
    elif "-" in time_text:  # 假设是 "MM-DD"
        try:
            parts = time_text.split("-")
            if len(parts) == 2:
                month, day = map(int, parts)
                year_to_use = current_year
                # Simple heuristic for year: if parsed month/day is in the future relative to now, assume last year
                # A more robust solution might be needed for edge cases around year-end.
                temp_date_this_year = datetime(year_to_use, month, day)
                if temp_date_this_year > now : 
                    year_to_use = current_year - 1
                return datetime(year_to_use, month, day)
        except ValueError:
            logger.warning(f"无法解析MM-DD格式的发布时间: {time_text}")
    
    logger.warning(f"未知或无法处理的发布时间格式: {time_text}, 返回 None。")
    return None


# Helper function to transform a single XHS raw item
def _transform_xhs_item_to_search_note_data(
    raw_item: Dict[str, Any],
    platform_name: str,
    default_point_cost: int = 1 # Example default, should come from settings or config
) -> Optional[SearchNoteData]:
    """
    将单个小红书原始数据项 (来自xhs_data列表) 转换为 SearchNoteData Pydantic 模型。
    Relies on SearchNoteData fields having defaults in the schema.
    """
    try:
        note_card = raw_item.get('note_card')
        if not isinstance(note_card, dict): # Check if note_card is a dictionary
            logger.warning(f"原始数据中缺少 'note_card' 或格式不正确: {raw_item.get('id')}")
            return None

        data_to_construct = {} # Start with an empty dict

        # --- 基本信息 ---
        video_id = raw_item.get('id')
        if video_id:
            data_to_construct['video_id'] = str(video_id)
        
        title = note_card.get('display_title')
        if title:
            data_to_construct['title'] = str(title)
            # Use title as fallback for description if description is empty or not available
            data_to_construct['description'] = str(title)


        data_to_construct['platform'] = platform_name
        data_to_construct['author'] = create_author_from_api_user_data(note_card.get('user'))


        # --- 发布时间 ---
        publish_time_obj = None
        corner_tag_info_list = note_card.get('corner_tag_info')
        if isinstance(corner_tag_info_list, list): # Check if corner_tag_info is a list
            current_processing_year = datetime.now().year
            for tag_info in corner_tag_info_list:
                if isinstance(tag_info, dict) and tag_info.get('type') == 'publish_time' and tag_info.get('text'):
                    publish_time_obj = _parse_publish_time_text(tag_info['text'], current_processing_year)
                    if publish_time_obj:
                        break # Found and parsed
        if publish_time_obj: # Only add if successfully parsed
            data_to_construct['publish_time'] = publish_time_obj
        # If not parsed, SearchNoteData.publish_time will default to None as per schema.

        # --- 互动数 ---
        interact_info = note_card.get('interact_info')
        if isinstance(interact_info, dict): # Check if interact_info is a dictionary
            try:
                data_to_construct['like_count'] = int(interact_info.get('liked_count', 0))
                data_to_construct['comment_count'] = int(interact_info.get('comment_count', 0))
                data_to_construct['share_count'] = int(interact_info.get('shared_count', 0))
                data_to_construct['collect_count'] = int(interact_info.get('collected_count', 0))
            except (ValueError, TypeError) as e:
                logger.warning(f"转换互动数失败 for note_id {video_id}: {e}, Pydantic defaults will be used.")
        
        # play_count defaults to 0 in schema, XHS note search data doesn't provide it.
        # tags defaults to "" in schema, XHS note search data doesn't provide it directly.
        # duration defaults to 0 in schema, XHS note search data (for images) doesn't provide it.

        # --- 笔记链接 (video_platform_url) ---
        xsec_token_note = raw_item.get('xsec_token')
        if video_id: # video_id must exist for a valid URL
            base_url = f"https://www.xiaohongshu.com/explore/{video_id}"
            if xsec_token_note:
                data_to_construct['video_platform_url'] = f"{base_url}?xsec_token={xsec_token_note}&xsec_source="
            else:
                data_to_construct['video_platform_url'] = base_url
        
        # --- 视频/封面 URL ---
        # video_url defaults to "" in schema.
        # For XHS image notes, this could be the URL of the primary image.
        cover_info = note_card.get('cover')
        if isinstance(cover_info, dict) and cover_info.get('url_default'): # Check if cover_info is a dictionary
            data_to_construct['cover_url'] = str(cover_info['url_default'])

        image_list = note_card.get('image_list')
        if isinstance(image_list, list) and image_list: # Check if image_list is a non-empty list
            first_image_info_list = image_list[0].get('info_list')
            if isinstance(first_image_info_list, list) and first_image_info_list: # Check further
                first_image_url_info = first_image_info_list[0]
                if isinstance(first_image_url_info, dict) and first_image_url_info.get('url'):
                     # If video_url is meant to be the main content URL for image posts
                    data_to_construct['video_url'] = str(first_image_url_info['url'])


        # --- 积分消耗 ---
        data_to_construct['point_cost'] = default_point_cost

        # --- 构建 SearchNoteData 对象 ---
        # Pydantic will use defaults from schema for any key not in data_to_construct
        return SearchNoteData(**data_to_construct)

    except Exception as e:
        note_id_for_log = raw_item.get('id', '未知ID')
        logger.error(f"转换小红书数据项 (ID: {note_id_for_log}) 失败: {e}", exc_info=True)
        return None



class UserProfileHelper:
    def __init__(self):
        # Consider initializing media_service and formatter here if they are instance-specific
        # For now, using the module-level instances as per your existing code.
        pass
    
    
    # This method is correctly defined as part of the class
    def _parse_int_safe(self, value: Any, default: int = 0) -> int:
        """Safely convert a value to int, returning a default if conversion fails."""
        if value is None:
            return default
        try:
            return int(value)
        except (ValueError, TypeError):
            return default

    async def _unify_profile_data(self, raw_result: Dict, platform: str, user_url: str) -> Optional[Dict[str, Any]]:
        unified_data: Dict[str, Any] = {
            "platform": platform,
            "user_id_platform": None,
            "nickname": None,
            "avatar_url": None,
            "bio_signature": None,
            "follower_count": 0,
            "following_count": 0,
            "total_engagement": 0,
            "post_count": 0,
            "ip_location": None,
            "original_url": user_url
            # Removed "raw_data" from here as it was not in your original unified_data init
            # but you might want to add it back if it's part of your intended unified structure.
        }

        data = raw_result # Assuming raw_result is the content of the API's 'data' field

        try:
            if platform == "douyin":
                if not isinstance(data, dict):
                    logger.error(f"Unification: Douyin data is not a dict: {type(data)}", extra={"user_url": user_url})
                    return None
                unified_data.update({
                    "user_id_platform": data.get("sec_uid") or data.get("uid"),
                    "nickname": data.get("nickname"),
                    "avatar_url": data.get("avatar"),
                    "bio_signature": data.get("signature"),
                    # CORRECTED CALLS:
                    "follower_count": self._parse_int_safe(data.get("follower_count")),
                    "following_count": self._parse_int_safe(data.get("following_count")),
                    "total_engagement": self._parse_int_safe(data.get("total_favorited")),
                    "post_count": self._parse_int_safe(data.get("aweme_count")),
                    "ip_location": str(data.get("ip_location", "")).replace("IP属地：", "").strip() or None,
                })
            elif platform == "xiaohongshu":
                # Your log for XHS shows raw_result is already the parsed JSON string (i.e., a dict here)
                # However, your code handles if 'data' is a string first.
                # Based on "获取小红书用户信息成功: {"result": ...}" log for raw_result,
                # raw_result itself is the dictionary that was previously a JSON string.
                
                if isinstance(data, str): # This case might not be hit if media_service already parses XHS string
                    try:
                        data = json.loads(data)
                    except json.JSONDecodeError:
                        logger.error(f"Unification: Xiaohongshu data string is not valid JSON.", extra={"user_url": user_url, "data_string": data})
                        return None
                
                if not isinstance(data, dict) or not data.get("basic_info"):
                    logger.error(f"Unification: Xiaohongshu parsed data is not a dict or missing 'basic_info': {type(data)} ({data})", extra={"user_url": user_url})
                    return None

                basic_info = data.get("basic_info", {})
                interactions_list = data.get("interactions", [])
                # CORRECTED CALL:
                interactions_map = {item.get("type"): self._parse_int_safe(item.get("count")) for item in interactions_list if item.get("type")}

                unified_data.update({
                    "user_id_platform": basic_info.get("red_id"),
                    "nickname": basic_info.get("nickname"),
                    "avatar_url": basic_info.get("images"),
                    "bio_signature": basic_info.get("desc"),
                    "follower_count": interactions_map.get("fans", 0),
                    "following_count": interactions_map.get("follows", 0),
                    "total_engagement": interactions_map.get("interaction", 0),
                    # CORRECTED CALL:
                    "post_count": self._parse_int_safe(data.get("notes_count")), # Assuming 'notes_count' exists
                    "ip_location": basic_info.get("ip_location"),
                })
            else:
                logger.warning(f"Unification: Unknown platform '{platform}'", extra={"user_url": user_url})
                return None

        except Exception as e:
            tb_str = traceback.format_exc()
            logger.error(f"Unification: Error during data extraction for {platform}: {str(e)}\nTraceback:\n{tb_str}", extra={"user_url": user_url})
            return None

        return unified_data

    @async_cache_result(expire_seconds=600, prefix="user_profile_helper")
    async def get_user_profile_logic(self, user_url):
        trace_key = request_ctx.get_trace_key()
        user_id = request_ctx.get_cappa_user_id()
        app_id = request_ctx.get_app_id()
        platform = formatter._identify_platform(user_url) # Assuming formatter is defined
        log_extra = {"request_id": trace_key, "user_id": user_id, "app_id": app_id, "platform": platform, "user_url": user_url}
        
        try:
            raw_result = await media_service.get_user_profile(user_url, log_extra)
            if not raw_result: 
                logger.error(f"get_user_profile_logic-media_service returned None for {user_url}", extra=log_extra)
                return None, log_extra

            unified_profile = await self._unify_profile_data(raw_result, platform, user_url)

            if unified_profile is None:
                logger.warning(f"get_user_profile_logic-未能统一用户主页信息 for {user_url}", extra=log_extra)
                return None, log_extra 

            return unified_profile, log_extra
        except Exception as e:
            # 获取完整的堆栈跟踪信息
            tb_str = traceback.format_exc()
            logger.error(f"get_user_profile_logic-获取用户主页信息失败: {str(e)}\nTraceback:\n{tb_str}", extra=log_extra)
            raise # 重新抛出原始异常

    @async_cache_result(expire_seconds=600,prefix="user_profile_helper")
    async def get_user_full_info(self, user_url):
        trace_key = request_ctx.get_trace_key()
        user_id = request_ctx.get_cappa_user_id()
        app_id = request_ctx.get_app_id()
        platform = formatter._identify_platform(user_url)
        log_extra = {"request_id": trace_key, "user_id": user_id, "app_id": app_id, "platform": platform, "user_url": user_url}
        
        try:
            result = await media_service.async_get_user_full_info(platform,user_url, log_extra)
            return result
        except Exception as e:
            logger.error(f"get_user_full_info-获取用户主页信息失败: {str(e)}", extra=log_extra)
            raise

    async def search_note_by_kword_logic(self, 
        req: Request,
        s_req: SearchNoteRequest
    ) -> tuple[RequestContext, List[SearchNoteData], Dict[str, Any]]: # Added return type hint

        trace_key = request_ctx.get_trace_key()
        app_id = request_ctx.get_app_id()
        source = request_ctx.get_source()
        user_id = request_ctx.get_cappa_user_id()
        user_name = request_ctx.get_user_name()
        ip_address = req.client.host if req.client else "unknown_ip"
        # root_trace_key = request_ctx.get_root_trace_key() # Defined but not used in RequestContext below

        # Corrected timestamp to be a datetime object
        request_context = RequestContext(
            trace_id=trace_key, 
            app_id=app_id, 
            user_id=user_id, 
            source=source, 
            user_name=user_name, 
            ip=ip_address,
            timestamp=datetime.now() # Pass datetime object
        )
        log_extra = {"request_id": trace_key, "user_id": user_id, "app_id": app_id, "platform": s_req.platform, "query": s_req.query}
        
        logger.info_to_db(f"收到平台笔记搜索请求: platform={s_req.platform}, query={s_req.query}, num={s_req.num}, sort={s_req.qsort}", extra=log_extra)
        
        processed_notes: List[SearchNoteData] = []
        total_points_consumed = 0 # Initialize total points

        try:
            if not s_req.platform or not s_req.query:
                logger.warning(f"平台笔记搜索失败: platform或query参数缺失.", extra=log_extra)
                # Return empty list for notes as per schema, and calculated context
                # No points consumed if request is invalid before service call.
                request_ctx.set_consumed_points(0)
                # Consider raising HTTPException for bad request here
                # raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="platform and query are required")
                # For now, returning empty list as per previous structure implies success with no data
                return request_context, [], log_extra


            # Call media_service to get raw search results
            # This is assumed to be List[Dict[str, Any]]
            raw_note_list_from_service = await media_service.search_note_by_kword(
                trace_key, s_req.platform, s_req.query, s_req.num, s_req.qsort, log_extra
            )
            logger.info_to_db(f"平台笔记搜索成功: platform={s_req.platform}, query={s_req.query}, result_count={len(raw_note_list_from_service) if raw_note_list_from_service else 0}", extra=log_extra)
            
            if not raw_note_list_from_service:
                logger.warning(f"平台笔记搜索没找到结果: platform={s_req.platform}, query={s_req.query}", extra=log_extra)
                request_ctx.set_consumed_points(0) # No results, no points consumed beyond a base if applicable
                return request_context, [], log_extra # Return empty list for notes


            if raw_note_list_from_service: #
                # 确定点数消耗的逻辑 (示例，您可能需要更复杂的逻辑)
                # point_cost_for_item = settings.DEFAULT_NOTE_POINT_COST if hasattr(settings, 'DEFAULT_NOTE_POINT_COST') else 1
                # **修改点**：根据平台调用不同的转换函数
                current_platform = s_req.platform # 将字符串平台转换为 MediaPlatform 枚举成员

                for raw_item in raw_note_list_from_service: #
                    transformed_note: Optional[SearchNoteData] = None
                    # 默认点数消耗，如果 SearchNoteData 模型中没有默认值，则必须在这里提供
                    point_cost_for_item = settings.DEFAULT_NOTE_POINT_COST if hasattr(settings, 'DEFAULT_NOTE_POINT_COST') else 1


                    if current_platform == MediaPlatform.DOUYIN: #
                        transformed_note = _transform_douyin_item_to_search_note_data(
                            raw_item,
                            s_req.platform, # 传递原始平台字符串
                            default_point_cost=point_cost_for_item
                        )
                    elif current_platform == MediaPlatform.XIAOHONGSHU: #
                        transformed_note = _transform_xhs_item_to_search_note_data( #
                            raw_item, 
                            s_req.platform, # 传递原始平台字符串
                            default_point_cost=point_cost_for_item 
                        )
                    # ... (可以为其他平台添加 elif分支和相应的转换函数) ...
                    else:
                        logger.warning(f"search_note_by_kword_logic-平台 {s_req.platform} 的转换逻辑未实现", extra=log_extra)

                    if transformed_note: #
                        processed_notes.append(transformed_note) #
                        total_points_consumed += transformed_note.point_cost #

            request_ctx.set_consumed_points(total_points_consumed)

            return request_context, processed_notes, log_extra

        except ValueError as ve: # Catch specific validation errors like "platform and query are required"
            logger.error(f"search_note_by_kword_logic 参数错误: {ve}", extra=log_extra)
            request_ctx.set_consumed_points(0)
            raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(ve))
        except HTTPException as http_exc: # Re-raise HTTPExceptions
            raise http_exc
        except Exception as e: # Catch other unexpected errors
            logger.error(f"search_note_by_kword_logic 发生未知错误: {e}", extra=log_extra, exc_info=True)
            request_ctx.set_consumed_points(0) # Or some error-specific point deduction
            # Ensure you return or raise an HTTP response, not a generic Exception
            raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f"服务器内部错误: {e}")