import requests
import json
import sys
from typing import Dict, Any, Optional, List
from loguru import logger
from config import settings
import os
from urllib.parse import urlparse
import re
from pathlib import Path

# 解决Windows系统编码问题
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')

class DifyAPIClient:
    """最简化的Dify API客户端"""
    
    def __init__(self):
        self.base_url = settings.dify_base_url.rstrip('/')
        self.api_key = settings.dify_api_key
        self.headers = {
            "Authorization": f"Bearer {self.api_key}",
            "Content-Type": "application/json"
        }
    
    def create_knowledge_base(self, name: str, description: str = "", 
                            indexing_technique: str = "high_quality",
                            embedding_model: str = "text-embedding-v3",
                            embedding_model_provider: str = "tongyi") -> Optional[str]:
        """
        创建空知识库
        
        Args:
            name: 知识库名称
            description: 知识库描述
            indexing_technique: 索引方式 (high_quality/economy)
            embedding_model: Embedding模型名称
            embedding_model_provider: 模型供应商
            
        Returns:
            知识库ID，失败返回None
        """
        url = f"{self.base_url}/datasets"
        
        data = {
            "name": name,
            "description": description or f"知识库: {name}",
            "indexing_technique": indexing_technique,
            "permission": "only_me",
            "embedding_model": embedding_model,
            "embedding_model_provider": embedding_model_provider,
            "retrieval_model": {
                "search_method": "semantic_search",
                "reranking_enable": False,
                "top_k": 3,
                "score_threshold_enabled": False,
                "score_threshold": 0.5
            }
        }
        
        try:
            logger.info(f"创建知识库: {name}")
            response = requests.post(url, headers=self.headers, json=data, timeout=30)
            
            if response.status_code == 200:
                result = response.json()
                dataset_id = result.get('id')
                logger.success(f"✅ 知识库创建成功: {name}, ID: {dataset_id}")
                return dataset_id
            else:
                error_msg = f"状态码: {response.status_code}, 错误: {response.text}"
                logger.error(f"❌ 知识库创建失败: {error_msg}")
                return None
                
        except requests.exceptions.RequestException as e:
            logger.error(f"❌ 网络请求失败: {str(e)}")
            return None
        except json.JSONDecodeError as e:
            logger.error(f"❌ JSON解析失败: {str(e)}")
            return None
        except Exception as e:
            logger.error(f"❌ 创建知识库失败: {str(e)}")
            return None
    
    def upload_document(self, file_path: str, dataset_id: str, user_id: str = "api-user") -> Optional[Dict[str, Any]]:
        """
        上传文档到指定知识库
        
        Args:
            file_path: 本地文档路径
            dataset_id: 目标知识库ID
            user_id: 用户标识符，默认为"api-user"
            
        Returns:
            上传结果，失败返回None
        """
        import os
        
        try:
            if not os.path.exists(file_path):
                logger.error(f"❌ 文件不存在: {file_path}")
                return None
            
            file_name = os.path.basename(file_path)
            logger.info(f"上传文档: {file_name} -> 知识库 {dataset_id}")
            
            # 直接上传到Dify服务器，使用正确的端点
            url = f"{self.base_url}/datasets/{dataset_id}/document/create-by-file"
            
            # 准备文件上传
            with open(file_path, 'rb') as f:
                files = {
                    'file': (file_name, f, 'application/octet-stream')
                }
                
                # 使用认证头
                headers = {
                    "Authorization": f"Bearer {self.api_key}",
                    "X-User-ID": user_id
                }
                
                response = requests.post(url, headers=headers, files=files, timeout=120)
                
                # 记录响应内容用于调试
                logger.debug(f"响应状态码: {response.status_code}")
                logger.debug(f"响应内容: {response.text}")
                
                if response.status_code == 200:
                    try:
                        result = response.json()
                        document = result.get('document')
                        if document:
                            logger.success(f"✅ 文档上传成功: {file_name}")
                            return result
                        else:
                            logger.error(f"❌ 响应中没有文档信息: {result}")
                            return None
                    except Exception as e:
                        logger.error(f"❌ JSON解析失败: {str(e)}")
                        logger.error(f"响应内容: {response.text}")
                        return None
                else:
                    error_msg = f"状态码: {response.status_code}, 错误: {response.text}"
                    logger.error(f"❌ 文档上传失败: {error_msg}")
                    return None
                    
        except requests.exceptions.RequestException as e:
            logger.error(f"❌ 网络请求失败: {str(e)}")
            return None
        except Exception as e:
            logger.error(f"❌ 上传文档失败: {str(e)}")
            return None
    
    def create_document_by_file_url(self, dataset_id: str, name: str, file_url: str,
                                   indexing_technique: str = "high_quality") -> Optional[Dict[str, Any]]:
        """
        通过文件URL创建文档
        
        Args:
            dataset_id: 知识库ID
            name: 文档名称
            file_url: 文件下载链接
            indexing_technique: 索引方式 (high_quality/economy)
            
        Returns:
            文档信息，失败返回None
        """
        url = f"{self.base_url}/datasets/{dataset_id}/documents"
        
        data = {
            "name": name,
            "file_url": file_url,
            "indexing_technique": indexing_technique
        }
        
        try:
            logger.info(f"通过URL创建文档: {name} -> 知识库 {dataset_id}")
            response = requests.post(url, headers=self.headers, json=data, timeout=60)
            
            if response.status_code == 200:
                result = response.json()
                document = result.get('document')
                if document:
                    logger.success(f"✅ URL文档创建成功: {name}, ID: {document['id']}")
                    return result
                else:
                    logger.error(f"❌ URL文档创建失败: 响应中无文档信息")
                    return None
            else:
                error_msg = f"状态码: {response.status_code}, 错误: {response.text}"
                logger.error(f"❌ URL文档创建失败: {error_msg}")
                return None
                
        except requests.exceptions.RequestException as e:
            logger.error(f"❌ 网络请求失败: {str(e)}")
            return None
        except Exception as e:
            logger.error(f"❌ 通过URL创建文档失败: {str(e)}")
            return None
    
    def upload_documents_batch(self, file_paths: List[str], knowledge_base_name: str) -> Dict[str, Any]:
        """
        批量上传文档到知识库
        
        Args:
            file_paths: 文档路径列表（可以是本地路径或URL）
            knowledge_base_name: 知识库名称
            
        Returns:
            处理结果
        """
        if not file_paths:
            logger.warning("⚠️ 没有文档需要处理")
            return {"status": "no_files", "message": "没有文档需要处理"}
        
        logger.info(f"开始批量处理 {len(file_paths)} 个文档到知识库: {knowledge_base_name}")
        
        # 1. 创建知识库
        dataset_id = self.create_knowledge_base(knowledge_base_name)
        if not dataset_id:
            return {"status": "failed", "message": "知识库创建失败"}
        
        # 2. 分离本地文件和URL文件
        local_files = []
        url_files = []
        
        for file_path in file_paths:
            if file_path.startswith(('http://', 'https://')):
                url_files.append(file_path)
            else:
                local_files.append(file_path)
        
        # 3. 处理本地文件
        success_count = 0
        failed_count = 0
        uploaded_files = []
        failed_files = []
        
        # 处理本地文件
        for file_path in local_files:
            result = self.upload_document(file_path, dataset_id)
            if result:
                success_count += 1
                uploaded_files.append(file_path)
                logger.info(f"进度: {success_count + failed_count}/{len(file_paths)}")
            else:
                failed_count += 1
                failed_files.append(file_path)
        
        # 处理URL文件
        for file_url in url_files:
            # 下载文件到临时目录
            downloaded_file_path = self.download_file_from_url(file_url)
            if downloaded_file_path:
                # 上传下载的文件
                result = self.upload_document(downloaded_file_path, dataset_id)
                if result:
                    success_count += 1
                    uploaded_files.append(file_url)  # 记录原始URL
                    logger.info(f"进度: {success_count + failed_count}/{len(file_paths)}")
                else:
                    failed_count += 1
                    failed_files.append(file_url)
                
                # 删除临时下载的文件
                try:
                    os.remove(downloaded_file_path)
                except Exception as e:
                    logger.warning(f"无法删除临时文件 {downloaded_file_path}: {str(e)}")
            else:
                failed_count += 1
                failed_files.append(file_url)
        
        logger.info(f"📊 批量上传完成 - 成功: {success_count}, 失败: {failed_count}, 总计: {len(file_paths)}")
        
        return {
            "status": "completed",
            "dataset_id": dataset_id,
            "dataset_name": knowledge_base_name,
            "success": success_count,
            "failed": failed_count,
            "total": len(file_paths),
            "uploaded_files": uploaded_files,
            "failed_files": failed_files
        }
    
    def create_document_by_text(self, dataset_id: str, name: str, text: str,
                               indexing_technique: str = "high_quality",
                               doc_form: str = "text_model",
                               doc_language: str = "Chinese",
                               process_rule: Dict[str, Any] = None) -> Optional[Dict[str, Any]]:
        """
        通过文本创建文档
        
        Args:
            dataset_id: 知识库ID
            name: 文档名称
            text: 文档文本内容
            indexing_technique: 索引方式 (high_quality/economy)
            doc_form: 内容形式 (text_model/qa_model/hierarchical_model)
            doc_language: 文档语言 (仅qa_model模式有效)
            process_rule: 文本处理规则
            
        Returns:
            文档信息，失败返回None
        """
        url = f"{self.base_url}/datasets/{dataset_id}/document/create-by-text"
        
        data = {
            "name": name,
            "text": text,
            "indexing_technique": indexing_technique,
            "doc_form": doc_form,
            "doc_language": doc_language
        }
        
        # 添加处理规则
        if process_rule:
            data["process_rule"] = process_rule
        else:
            # 默认自动处理规则
            data["process_rule"] = {"mode": "automatic"}
        
        try:
            logger.info(f"创建文本文档: {name} -> 知识库 {dataset_id}")
            response = requests.post(url, headers=self.headers, json=data, timeout=60)
            
            if response.status_code == 200:
                result = response.json()
                document = result.get('document')
                if document:
                    logger.success(f"✅ 文本文档创建成功: {name}, ID: {document['id']}")
                    return result
                else:
                    logger.error(f"❌ 响应中没有文档信息: {result}")
                    return None
            else:
                error_msg = f"状态码: {response.status_code}, 错误: {response.text}"
                logger.error(f"❌ 文本文档创建失败: {error_msg}")
                return None
                
        except requests.exceptions.RequestException as e:
            logger.error(f"❌ 网络请求失败: {str(e)}")
            return None
        except json.JSONDecodeError as e:
            logger.error(f"❌ JSON解析失败: {str(e)}")
            return None
        except Exception as e:
            logger.error(f"❌ 创建文本文档失败: {str(e)}")
            return None
    
    def get_document_status(self, dataset_id: str, document_id: str) -> Optional[Dict[str, Any]]:
        """
        获取文档状态
        
        Args:
            dataset_id: 知识库ID
            document_id: 文档ID
            
        Returns:
            文档状态信息，失败返回None
        """
        url = f"{self.base_url}/datasets/{dataset_id}/documents/{document_id}"
        
        try:
            logger.info(f"查询文档状态: {document_id}")
            response = requests.get(url, headers=self.headers, timeout=30)
            
            if response.status_code == 200:
                result = response.json()
                document = result.get('document')
                if document:
                    status = document.get('indexing_status')
                    display_status = document.get('display_status')
                    logger.info(f"文档状态: {status} ({display_status})")
                    return result
                else:
                    logger.error(f"❌ 响应中没有文档信息: {result}")
                    return None
            else:
                error_msg = f"状态码: {response.status_code}, 错误: {response.text}"
                logger.error(f"❌ 查询文档状态失败: {error_msg}")
                return None
                
        except requests.exceptions.RequestException as e:
            logger.error(f"❌ 网络请求失败: {str(e)}")
            return None
        except Exception as e:
            logger.error(f"❌ 查询文档状态失败: {str(e)}")
            return None
    
    def get_dataset_detail(self, dataset_id: str) -> Optional[Dict[str, Any]]:
        """
        获取知识库详情
        
        Args:
            dataset_id: 知识库ID
            
        Returns:
            知识库详细信息，失败返回None
        """
        url = f"{self.base_url}/datasets/{dataset_id}"
        
        try:
            logger.info(f"获取知识库详情: {dataset_id}")
            response = requests.get(url, headers=self.headers, timeout=30)
            
            if response.status_code == 200:
                result = response.json()
                logger.success(f"✅ 获取知识库详情成功: {dataset_id}")
                return result
            else:
                error_msg = f"状态码: {response.status_code}, 错误: {response.text}"
                logger.error(f"❌ 获取知识库详情失败: {error_msg}")
                return None
                
        except requests.exceptions.RequestException as e:
            logger.error(f"❌ 网络请求失败: {str(e)}")
            return None
        except Exception as e:
            logger.error(f"❌ 获取知识库详情失败: {str(e)}")
            return None
    
    def get_datasets(self) -> List[Dict[str, Any]]:
        """
        获取知识库列表
        
        Returns:
            知识库列表
        """
        url = f"{self.base_url}/datasets"
        
        try:
            logger.info("获取知识库列表...")
            response = requests.get(url, headers=self.headers, timeout=30)
            
            if response.status_code == 200:
                result = response.json()
                datasets = result.get('data', [])
                logger.success(f"获取到 {len(datasets)} 个知识库")
                return datasets
            else:
                error_msg = f"状态码: {response.status_code}, 错误: {response.text}"
                logger.error(f"❌ 获取知识库列表失败: {error_msg}")
                return []
                
        except requests.exceptions.RequestException as e:
            logger.error(f"❌ 网络请求失败: {str(e)}")
            return []
        except Exception as e:
            logger.error(f"❌ 获取知识库列表失败: {str(e)}")
            return []
    
    def update_dataset(self, dataset_id: str, name: str = None, indexing_technique: str = None,
                      permission: str = None, embedding_model_provider: str = None,
                      embedding_model: str = None, retrieval_model: Dict[str, Any] = None,
                      partial_member_list: List[str] = None) -> Optional[Dict[str, Any]]:
        """
        更新知识库详情
        
        Args:
            dataset_id: 知识库ID
            name: 知识库名称
            indexing_technique: 索引模式 (high_quality/economy)
            permission: 权限 (only_me/all_team_members/partial_members)
            embedding_model_provider: 嵌入模型提供商
            embedding_model: 嵌入模型
            retrieval_model: 检索参数配置
            partial_member_list: 部分团队成员ID列表 (当permission为partial_members时必填)
            
        Returns:
            更新后的知识库信息，失败返回None
        """
        url = f"{self.base_url}/datasets/{dataset_id}"
        
        # 构建请求数据，只包含非空参数
        data = {}
        
        if name is not None:
            data["name"] = name
        if indexing_technique is not None:
            data["indexing_technique"] = indexing_technique
        if permission is not None:
            data["permission"] = permission
        if embedding_model_provider is not None:
            data["embedding_model_provider"] = embedding_model_provider
        if embedding_model is not None:
            data["embedding_model"] = embedding_model
        if retrieval_model is not None:
            data["retrieval_model"] = retrieval_model
        if partial_member_list is not None:
            data["partial_member_list"] = partial_member_list
        
        # 如果没有要更新的数据，直接返回None
        if not data:
            logger.warning("没有提供任何要更新的参数")
            return None
        
        try:
            logger.info(f"更新知识库: {dataset_id}")
            logger.debug(f"更新数据: {data}")
            
            response = requests.patch(url, headers=self.headers, json=data, timeout=60)
            
            if response.status_code == 200:
                result = response.json()
                logger.success(f"✅ 知识库更新成功: {dataset_id}")
                return result
            else:
                error_msg = f"状态码: {response.status_code}, 错误: {response.text}"
                logger.error(f"❌ 知识库更新失败: {error_msg}")
                return None
                
        except requests.exceptions.RequestException as e:
            logger.error(f"❌ 网络请求失败: {str(e)}")
            return None
        except json.JSONDecodeError as e:
            logger.error(f"❌ JSON解析失败: {str(e)}")
            return None
        except Exception as e:
            logger.error(f"❌ 知识库更新失败: {str(e)}")
            return None

    def delete_dataset(self, dataset_id: str) -> bool:
        """
        删除知识库
        
        Args:
            dataset_id: 知识库ID
            
        Returns:
            删除成功返回True，失败返回False
        """
        url = f"{self.base_url}/datasets/{dataset_id}"
        
        try:
            logger.info(f"删除知识库: {dataset_id}")
            response = requests.delete(url, headers=self.headers, timeout=30)
            
            # 204 No Content 是删除成功的标准响应
            # 有些API实现可能返回200，我们也接受
            if response.status_code in [200, 204]:
                logger.success(f"✅ 知识库删除成功: {dataset_id}")
                return True
            else:
                error_msg = f"状态码: {response.status_code}, 错误: {response.text}"
                logger.error(f"❌ 知识库删除失败: {error_msg}")
                return False
                
        except requests.exceptions.RequestException as e:
            logger.error(f"❌ 网络请求失败: {str(e)}")
            return False
        except Exception as e:
            logger.error(f"❌ 知识库删除失败: {str(e)}")
            return False
    
    def get_documents(self, dataset_id: str) -> List[Dict[str, Any]]:
        """
        获取知识库中的文档列表
        
        Args:
            dataset_id: 知识库ID
            
        Returns:
            文档列表，失败返回空列表
        """
        url = f"{self.base_url}/datasets/{dataset_id}/documents"
        
        try:
            logger.info(f"获取知识库文档列表: {dataset_id}")
            response = requests.get(url, headers=self.headers, timeout=30)
            
            if response.status_code == 200:
                result = response.json()
                documents = result.get('data', [])
                logger.success(f"✅ 获取文档列表成功: {len(documents)} 个文档")
                return documents
            else:
                error_msg = f"状态码: {response.status_code}, 错误: {response.text}"
                logger.error(f"❌ 获取文档列表失败: {error_msg}")
                return []
                
        except requests.exceptions.RequestException as e:
            logger.error(f"❌ 网络请求失败: {str(e)}")
            return []
        except Exception as e:
            logger.error(f"❌ 获取文档列表失败: {str(e)}")
            return []
    
    def delete_document(self, dataset_id: str, document_id: str) -> bool:
        """
        删除知识库中的指定文档
        
        Args:
            dataset_id: 知识库ID
            document_id: 文档ID
            
        Returns:
            删除成功返回True，失败返回False
        """
        url = f"{self.base_url}/datasets/{dataset_id}/documents/{document_id}"
        
        try:
            logger.info(f"删除文档: {document_id} (知识库: {dataset_id})")
            response = requests.delete(url, headers=self.headers, timeout=30)
            
            if response.status_code in [200, 204]:
                logger.success(f"✅ 文档删除成功: {document_id}")
                return True
            else:
                error_msg = f"状态码: {response.status_code}, 错误: {response.text}"
                logger.error(f"❌ 文档删除失败: {error_msg}")
                return False
                
        except requests.exceptions.RequestException as e:
            logger.error(f"❌ 网络请求失败: {str(e)}")
            return False
        except Exception as e:
            logger.error(f"❌ 文档删除失败: {str(e)}")
            return False
    
    def update_document_by_file(self, dataset_id: str, document_id: str, file_path: str, 
                               name: str = None, process_rule: Dict[str, Any] = None) -> Optional[Dict[str, Any]]:
        """
        通过文件更新文档
        
        Args:
            dataset_id: 知识库ID
            document_id: 文档ID
            file_path: 文件路径
            name: 文档名称（可选）
            process_rule: 处理规则（可选）
            
        Returns:
            更新后的文档信息，失败返回None
        """
        url = f"{self.base_url}/datasets/{dataset_id}/documents/{document_id}/update-by-file"
        
        try:
            # 检查文件是否存在
            if not os.path.exists(file_path):
                logger.error(f"❌ 文件不存在: {file_path}")
                return None
            
            # 准备表单数据
            files = {'file': open(file_path, 'rb')}
            
            # 准备其他表单字段
            data = {}
            if name:
                data['name'] = name
            if process_rule:
                data['data'] = json.dumps({"process_rule": process_rule})
            
            logger.info(f"通过文件更新文档: {document_id} (文件: {file_path})")
            
            # 发送multipart/form-data请求
            response = requests.post(url, headers=self.headers, files=files, data=data, timeout=60)
            
            # 关闭文件
            files['file'].close()
            
            if response.status_code == 200:
                result = response.json()
                logger.success(f"✅ 文档更新成功: {document_id}")
                return result
            else:
                error_msg = f"状态码: {response.status_code}, 错误: {response.text}"
                logger.error(f"❌ 文档更新失败: {error_msg}")
                return None
                
        except requests.exceptions.RequestException as e:
            logger.error(f"❌ 网络请求失败: {str(e)}")
            return None
        except Exception as e:
            logger.error(f"❌ 文档更新失败: {str(e)}")
            return None

    def download_file_from_url(self, file_url: str, save_directory: str = "temp", headers: Optional[Dict[str, str]] = None) -> Optional[str]:
        """
        从URL下载文件并保存到本地
        
        Args:
            file_url: 文件下载链接
            save_directory: 保存目录，默认为temp
            headers: 下载请求可选HTTP头（例如Authorization），用于需要鉴权的链接
        
        Returns:
            下载后的文件路径，失败返回None
        """
        try:
            # 创建保存目录
            Path(save_directory).mkdir(parents=True, exist_ok=True)
            
            # 规范化URL，修复常见错误（如 https//... 缺少冒号）
            original_url = (file_url or "").strip()
            normalized_url = original_url
            try:
                # 修复 https// 或 http// 开头缺少冒号的情况
                normalized_url = re.sub(r"^(https?)(//)", r"\1://", normalized_url, flags=re.IGNORECASE)
                # 修复存在空格的情况，如 "https: //example.com"
                normalized_url = re.sub(r"^(https?):\s*//", r"\1://", normalized_url, flags=re.IGNORECASE)
                # 如果完全没有scheme但以www.开头，则补全为https
                if not re.match(r"^[a-zA-Z][a-zA-Z0-9+.-]*://", normalized_url):
                    if normalized_url.lower().startswith("www."):
                        normalized_url = "https://" + normalized_url
                # 如果发生了变化，记录提示日志
                if normalized_url != original_url:
                    logger.warning(f"检测到不规范URL，已纠正: {original_url} -> {normalized_url}")
            except Exception:
                # 任何规范化异常都不阻断流程，继续尝试原始URL
                normalized_url = original_url

            logger.info(f"开始下载文件: {normalized_url}")
            response = requests.get(normalized_url, timeout=60, headers=headers or self.headers)
            response.raise_for_status()  # 检查请求是否成功

            content_type = response.headers.get('content-type', '').lower()

            def _extract_filename_from_content_disposition(cd: str) -> Optional[str]:
                try:
                    if not cd:
                        return None
                    # 兼容 filename= 和 filename*=
                    if 'filename*=' in cd:
                        fname = cd.split('filename*=')[-1]
                        # 可能包含编码前缀，例如 UTF-8''document.pdf
                        if "''" in fname:
                            fname = fname.split("''", 1)[-1]
                        return fname.strip().strip('"')
                    if 'filename=' in cd:
                        fname = cd.split('filename=')[-1]
                        return fname.strip().strip('"')
                except Exception:
                    return None
                return None

            def _choose_filename(url_for_name: str, fallback_ct: str, resp_headers: Dict[str, Any]) -> str:
                parsed = urlparse(url_for_name)
                name = os.path.basename(parsed.path) or ''
                if not name:
                    name = _extract_filename_from_content_disposition(resp_headers.get('content-disposition', '')) or ''
                if not name:
                    # 根据content-type补一个合理的扩展名
                    if 'pdf' in fallback_ct:
                        name = 'document.pdf'
                    elif 'word' in fallback_ct or 'docx' in fallback_ct:
                        name = 'document.docx'
                    elif 'excel' in fallback_ct or 'spreadsheet' in fallback_ct:
                        name = 'document.xlsx'
                    elif 'text' in fallback_ct:
                        name = 'document.txt'
                    else:
                        name = 'downloaded_file'
                return name

            # 如果返回的是JSON或可解析为JSON，很可能是前端上传组件的文件列表元数据，而不是实际文件
            is_json = 'application/json' in content_type
            actual_binary_response = response
            actual_url_for_name = normalized_url

            if is_json:
                try:
                    meta = response.json()
                    candidate_url = None

                    def _find_url_in_meta(m):
                        # 支持常见结构：[{ url, fullPath, response: { url } }]
                        # 或 { url, fullPath }
                        if isinstance(m, dict):
                            url_val = m.get('url') or m.get('fullPath') or (
                                m.get('response', {}) if isinstance(m.get('response'), dict) else {}
                            )
                            if isinstance(url_val, dict):
                                return url_val.get('url')
                            return url_val
                        return None

                    if isinstance(meta, list) and meta:
                        candidate_url = _find_url_in_meta(meta[0])
                    elif isinstance(meta, dict):
                        candidate_url = _find_url_in_meta(meta)

                    if candidate_url:
                        # 如果是相对路径（如 /files/... 或 /xxx/yyy.pdf），拼接主机
                        base = urlparse(normalized_url)
                        from urllib.parse import urljoin
                        actual_download_url = urljoin(f"{base.scheme}://{base.netloc}", str(candidate_url))

                        logger.info(f"检测到JSON元数据，尝试跟进真实文件链接: {actual_download_url}")
                        actual_binary_response = requests.get(actual_download_url, timeout=60, headers=headers or self.headers)
                        actual_binary_response.raise_for_status()
                        actual_url_for_name = actual_download_url
                    else:
                        raise Exception("提供的URL返回JSON元数据，未发现可用的文件下载链接字段（url/fullPath）。")
                except json.JSONDecodeError:
                    # 内容类型标记为JSON但无法解析，按二进制处理
                    pass
                except Exception as e:
                    logger.error(f"❌ JSON元数据处理失败: {str(e)}")
                    raise

            # 确定文件名
            file_name = _choose_filename(actual_url_for_name, actual_binary_response.headers.get('content-type', '').lower(), actual_binary_response.headers)
            file_path = os.path.join(save_directory, file_name)

            # 写入文件
            with open(file_path, 'wb') as f:
                f.write(actual_binary_response.content)

            logger.success(f"✅ 文件下载成功: {file_path}")
            return file_path
            
        except Exception as e:
            logger.error(f"❌ 文件下载失败: {str(e)}")
            return None