import os
import time
from http import HTTPStatus
from urllib.parse import urlparse, urlunparse, quote, unquote
from fastapi import APIRouter, Body, Request, Depends, Form
from typing import Optional
from sqlalchemy.orm import Session
from loguru import logger
from alibabacloud_tea_openapi import models as open_api_models
from alibabacloud_bailian20231229.client import Client as BailianClient
from alibabacloud_bailian20231229 import models as bailian_models
from alibabacloud_credentials.models import Config as CredentialConfig
from alibabacloud_credentials.client import Client as CredentialClient

from configs.other_config import ACCESS_KEY_ID, ACCESS_KEY_SECRET, WORKSPACE_ID, ALGORITHM
from configs.kb_config import GO_SERVICE_SECRET_KEY
from configs.logging_config import configure_logging
from server.db.models.pet_kb_model import PetKnowledgeBase, PetKnowledgeFile
from server.db.session import get_db
from server.utils import BaseResponse, decode_verify_token

configure_logging()
router = APIRouter()

# 文件上传限制常量
MAX_FILE_SIZE = 10 * 1024 * 1024  # 10MB，单位：字节
MAX_DOCUMENTS_PER_USER = 10  # 每个用户最多上传10个文档


class BailianKnowledgeBaseClient:
    """阿里云百炼知识库客户端"""
    
    def __init__(
        self,
        access_key_id: str,
        access_key_secret: str,
        workspace_id: str,
        endpoint: str = "bailian.cn-beijing.aliyuncs.com"
    ):
        """
        初始化百炼知识库客户端
        :param access_key_id: 阿里云访问密钥ID
        :param access_key_secret: 阿里云访问密钥密钥
        :param workspace_id: 工作空间ID
        :param endpoint: 服务端点
        """
        self.workspace_id = workspace_id
        self.endpoint = endpoint
        self._init_client(access_key_id, access_key_secret)
    
    def _init_client(self, access_key_id: str, access_key_secret: str):
        """初始化SDK客户端"""
        credential_config = CredentialConfig(
            type="access_key",
            access_key_id=access_key_id,
            access_key_secret=access_key_secret
        )
        
        api_config = open_api_models.Config(
            credential=CredentialClient(credential_config),
            endpoint=self.endpoint
        )
        
        self.client = BailianClient(api_config)
    
    @staticmethod
    def _get_field(obj, field_name, default=None):
        """
        兼容多种对象类型获取字段值
        支持：字典、类字典对象、对象属性
        
        注意：优先尝试字典和类字典访问，因为某些对象可能同时支持属性和字典访问，
        但属性访问可能返回错误的值
        """
        # 生成候选字段名（大小写/风格差异）
        candidates = [field_name, field_name.lower(), field_name.upper(), field_name.capitalize()]
        alias_map = {
            'Code': ['Code', 'code'],
            'Success': ['Success', 'success'],
            'Status': ['Status', 'status'],
            'Message': ['Message', 'message'],
            'RequestId': ['RequestId', 'requestId', 'request_id'],
            'Data': ['Data', 'data'],
            'Id': ['Id', 'id']
        }
        if field_name in alias_map:
            for a in alias_map[field_name]:
                if a not in candidates:
                    candidates.append(a)

        # 优先尝试字典访问（最常见的情况）
        if isinstance(obj, dict):
            for key in candidates:
                if key in obj:
                    return obj.get(key, default)
        
        # 尝试类字典对象（支持 [] 访问）
        if hasattr(obj, '__getitem__'):
            try:
                for key in candidates:
                    try:
                        return obj[key]
                    except Exception:
                        continue
            except (KeyError, TypeError, IndexError):
                pass
        
        # 最后尝试对象属性访问
        for key in candidates:
            if hasattr(obj, key):
                val = getattr(obj, key, default)
                # 排除方法
                if not callable(val):
                    return val

        # 尝试模型对象提供的 to_map / to_dict / __dict__
        for conv in ('to_map', 'to_dict'):
            if hasattr(obj, conv):
                try:
                    mapping = getattr(obj, conv)()
                    if isinstance(mapping, dict):
                        for key in candidates:
                            if key in mapping:
                                return mapping.get(key, default)
                except Exception:
                    pass
        if hasattr(obj, '__dict__') and isinstance(obj.__dict__, dict):
            d = obj.__dict__
            for key in candidates:
                if key in d:
                    return d.get(key, default)
        
        return default

    @staticmethod
    def _as_bool(value):
        if isinstance(value, bool):
            return value
        if isinstance(value, str):
            return value.strip().lower() in ('true', '1', 'yes', 'y')
        if isinstance(value, (int, float)):
            return value != 0
        return False

    @staticmethod
    def _as_int(value):
        try:
            return int(value)
        except Exception:
            return None

    @classmethod
    def _is_success(cls, body) -> bool:
        return cls._as_bool(cls._get_field(body, 'Success'))

    @classmethod
    def _should_retry(cls, body) -> bool:
        code = cls._get_field(body, 'Code')
        status = cls._get_field(body, 'Status')
        throttling_codes = {
            'TooManyRequests', 'Throttling', 'ThrottlingException', 'QuotaExceeded', 'RateLimited'
        }
        return (code in throttling_codes) or (status in (429, 503))

    def _request_with_retry(self, call, retries: int = 3, backoff_seconds: float = 0.5):
        """
        执行 API 调用并在限流/瞬时错误时重试（指数退避）
        call: 一个无参可调用，内部执行具体 SDK 方法并返回 resp
        """
        last_resp = None
        for attempt in range(retries):
            try:
                resp = call()
                last_resp = resp
                body = getattr(resp, 'body', None)
                if body is None:
                    return resp
                # 如果不是需要重试的情况，直接返回
                if not self._should_retry(body):
                    return resp
                # 需要重试
                wait = backoff_seconds * (2 ** attempt)
                logger.warning(f"调用触发限流/瞬时错误，{wait:.2f}s 后重试（第 {attempt + 1}/{retries} 次） | Body: {body}")
                time.sleep(wait)
            except Exception as e:
                # 异常仅在最后一次才抛出
                if attempt == retries - 1:
                    raise
                wait = backoff_seconds * (2 ** attempt)
                logger.warning(f"调用异常：{e}，{wait:.2f}s 后重试（第 {attempt + 1}/{retries} 次）")
                time.sleep(wait)
        return last_resp
    
    def create_knowledge_base(
        self, 
        name: str, 
        description: Optional[str] = None
    ) -> dict:
        """
        创建知识库
        
        :param name: 知识库名称
        :param description: 知识库描述
        :return: 包含 knowledge_base_id 的字典
        """
        try:
            # 步骤1: 创建知识库索引
            create_req = bailian_models.CreateIndexRequest(
                name=name,
                description=description or f"宠物专属知识库：{name}",
                structure_type="unstructured",  # 非结构化文档搜索类知识库
                sink_type="BUILT_IN",  # 向量数据托管在阿里云百炼平台
                embedding_model_name="text-embedding-v4",  # 使用V4嵌入模型
                rerank_model_name="gte-rerank-hybrid"  # 官方排序模型
            )
            
            resp = self._request_with_retry(lambda: self.client.create_index(self.workspace_id, create_req))
            
            # 打印详细响应信息以便调试
            logger.info(f"CreateIndex API 响应 - HTTP状态码: {resp.status_code}, Body: {resp.body}")
            
            # 仅判断 Success 是否为 true
            success = self._as_bool(self._get_field(resp.body, 'Success'))
            logger.info(f"CreateIndex 解析字段 -> Success: {success}, Code: {self._get_field(resp.body, 'Code')}, Status: {self._get_field(resp.body, 'Status')}")
            if not success:
                error_msg = self._get_field(resp.body, 'Message', '未知错误')
                
                # 特殊处理：如果是名称已存在的错误，提供更友好的提示
                code = self._get_field(resp.body, 'Code')
                if code == 'Index.IndexNameAlreadyExists':
                    logger.error(f"创建知识库失败 - 名称已存在: {name}")
                    return {
                        "success": False,
                        "error": f"知识库名称已存在，这可能是之前创建失败遗留的。错误信息: {error_msg}",
                        "error_code": code
                    }
                
                logger.error(f"创建知识库失败 - 错误码: {code}, 消息: {error_msg}")
                return {
                    "success": False,
                    "error": f"{code}: {error_msg}",
                    "error_code": code
                }
            
            # 获取响应数据
            data = self._get_field(resp.body, 'Data')
            
            if not data:
                error_msg = f"API响应缺少Data字段 - Body: {resp.body}"
                logger.error(error_msg)
                return {
                    "success": False,
                    "error": error_msg
                }
            
            # 获取知识库ID
            knowledge_base_id = self._get_field(data, 'Id')
            
            if not knowledge_base_id:
                error_msg = f"API响应Data中缺少Id字段 - Data: {data}"
                logger.error(error_msg)
                return {
                    "success": False,
                    "error": error_msg
                }
            
            logger.info(f"知识库创建成功，ID: {knowledge_base_id}")
            
            # 注意：不需要立即调用 SubmitIndexJob
            # SubmitIndexJob 应该在添加文件后调用，用于触发文件索引
            return {
                "success": True,
                "knowledge_base_id": knowledge_base_id,
                "request_id": self._get_field(resp.body, 'RequestId', '')
            }
            
        except Exception as e:
            logger.error(f"创建知识库异常: {e}", exc_info=True)
            return {
                "success": False,
                "error": str(e)
            }
    
    def add_file_by_url(
        self, 
        knowledge_base_id: str, 
        file_name: str, 
        file_url: str
    ) -> dict:
        """
        通过URL添加文件到知识库（完整流程：申请租约 -> 上传文件 -> 添加到类目）
        
        :param knowledge_base_id: 知识库ID
        :param file_name: 文件名
        :param file_url: 文件的公开URL
        :return: 包含 file_id 的字典
        """
        try:
            import requests
            import hashlib
            
            # 步骤1: 下载文件以获取文件大小和MD5
            # 对URL进行编码处理，确保中文字符等特殊字符能正确传输，且避免二次编码
            try:
                parsed_url = urlparse(file_url)
                # 只处理路径部分，保留查询参数和片段
                # 先对路径做一次解码，再按规范重新编码，避免出现 % 被二次编码为 %25
                normalized_path = unquote(parsed_url.path)
                encoded_path = quote(normalized_path, safe='/')
                encoded_url = urlunparse((
                    parsed_url.scheme,
                    parsed_url.netloc,
                    encoded_path,
                    parsed_url.params,
                    parsed_url.query,
                    parsed_url.fragment
                ))
                logger.info(f"原始URL: {file_url}")
                logger.info(f"编码后URL: {encoded_url}")
            except Exception as e:
                logger.warning(f"URL编码失败，使用原始URL: {e}")
                encoded_url = file_url
            
            logger.info(f"开始下载文件: {encoded_url}")
            response = requests.get(encoded_url, timeout=60, allow_redirects=True)
            if response.status_code != 200:
                logger.error(f"文件下载失败 - URL: {encoded_url}, HTTP状态码: {response.status_code}, 响应内容: {response.text[:200]}")
                return {
                    "success": False,
                    "error": f"无法下载文件，HTTP状态码: {response.status_code}。请检查文件URL是否正确且可公开访问。"
                }
            
            file_content = response.content
            file_size = len(file_content)
            
            # 步骤1.5: 如果是文本文件，检测并转换为 UTF-8 编码
            if file_name.lower().endswith('.txt'):
                try:
                    import chardet
                    # 检测文件编码
                    detected = chardet.detect(file_content)
                    detected_encoding = detected.get("encoding", "utf-8")
                    confidence = detected.get("confidence", 0)
                    
                    logger.info(f"检测到文件编码: {detected_encoding} (置信度: {confidence:.2f})")
                    
                    # 如果检测到的编码不是 UTF-8，进行转换
                    if detected_encoding and detected_encoding.lower() not in ['utf-8', 'ascii']:
                        try:
                            # 按检测到的编码读取文本
                            text_content = file_content.decode(detected_encoding, errors='ignore')
                            # 转换为 UTF-8 编码
                            file_content = text_content.encode('utf-8')
                            file_size = len(file_content)
                            logger.info(f"文件编码已从 {detected_encoding} 转换为 UTF-8，新大小: {file_size} bytes")
                        except (UnicodeDecodeError, LookupError) as e:
                            logger.warning(f"编码转换失败，使用原始内容: {e}")
                            # 如果转换失败，尝试直接按 UTF-8 解码再编码（忽略错误）
                            try:
                                text_content = file_content.decode('utf-8', errors='ignore')
                                file_content = text_content.encode('utf-8')
                                file_size = len(file_content)
                                logger.info(f"使用 UTF-8 重新编码，新大小: {file_size} bytes")
                            except Exception as e2:
                                logger.warning(f"UTF-8 重新编码也失败，保持原始内容: {e2}")
                    else:
                        logger.info(f"文件已经是 UTF-8 编码，无需转换")
                except ImportError:
                    logger.warning("chardet 库未安装，跳过编码检测。请安装: pip install chardet")
                except Exception as e:
                    logger.warning(f"编码检测/转换过程出错，使用原始内容: {e}")
            
            file_md5 = hashlib.md5(file_content).hexdigest()
            logger.info(f"文件下载成功 - 大小: {file_size} bytes, MD5: {file_md5}")
            
            # 检查文件大小限制（10MB）
            if file_size > MAX_FILE_SIZE:
                file_size_mb = file_size / (1024 * 1024)
                max_size_mb = MAX_FILE_SIZE / (1024 * 1024)
                logger.warning(f"文件大小超出限制 - 文件大小: {file_size_mb:.2f}MB, 最大限制: {max_size_mb}MB")
                return {
                    "success": False,
                    "error": f"文件大小超出限制，当前文件大小: {file_size_mb:.2f}MB，最大允许: {max_size_mb}MB"
                }
            
            # 步骤2: 申请文件上传租约
            logger.info("开始申请文件上传租约...")
            lease_req = bailian_models.ApplyFileUploadLeaseRequest(
                file_name=file_name,
                md_5=file_md5,
                size_in_bytes=str(file_size)
            )
            
            lease_resp = self._request_with_retry(lambda: self.client.apply_file_upload_lease(
                "default",  # category_id
                self.workspace_id,
                lease_req
            ))
            
            logger.info(f"ApplyFileUploadLease API 响应 - HTTP状态码: {lease_resp.status_code}, Body: {lease_resp.body}")
            
            if not self._is_success(lease_resp.body):
                error_msg = self._get_field(lease_resp.body, 'Message', '未知错误')
                code = self._get_field(lease_resp.body, 'Code')
                logger.error(f"申请租约失败 - 错误码: {code}, 消息: {error_msg}")
                return {
                    "success": False,
                    "error": f"申请租约失败: {code}: {error_msg}"
                }
            
            # 获取租约数据
            lease_data = self._get_field(lease_resp.body, 'Data')
            if not lease_data:
                return {
                    "success": False,
                    "error": "申请租约响应缺少Data字段"
                }
            
            file_upload_lease_id = self._get_field(lease_data, 'FileUploadLeaseId')
            param = self._get_field(lease_data, 'Param')
            
            if not file_upload_lease_id or not param:
                return {
                    "success": False,
                    "error": "租约响应缺少必要字段"
                }
            
            upload_url = self._get_field(param, 'Url')
            upload_method = self._get_field(param, 'Method', 'PUT')
            headers_data = self._get_field(param, 'Headers', {})
            
            logger.info(f"租约申请成功 - LeaseId: {file_upload_lease_id}")
            
            # 步骤3: 上传文件到临时存储
            logger.info(f"开始上传文件到临时存储: {upload_url}")
            upload_headers = {
                "X-bailian-extra": self._get_field(headers_data, 'X-bailian-extra', ''),
                "Content-Type": self._get_field(headers_data, 'Content-Type', 'application/octet-stream')
            }
            
            upload_response = requests.request(
                method=upload_method,
                url=upload_url,
                data=file_content,
                headers=upload_headers,
                timeout=300
            )
            
            if upload_response.status_code != 200:
                logger.error(f"文件上传失败 - HTTP状态码: {upload_response.status_code}")
                return {
                    "success": False,
                    "error": f"文件上传失败，HTTP状态码: {upload_response.status_code}"
                }
            
            logger.info("文件上传成功")
            
            # 步骤4: 添加文件到知识库类目
            logger.info("开始添加文件到知识库...")
            add_file_req = bailian_models.AddFileRequest(
                lease_id=file_upload_lease_id,  # 使用租约ID
                parser="DASHSCOPE_DOCMIND",
                category_id="default"
            )
            add_file_req.index_id = knowledge_base_id
            
            add_resp = self._request_with_retry(lambda: self.client.add_file(
                self.workspace_id,
                add_file_req
            ))
            
            logger.info(f"AddFile API 响应 - HTTP状态码: {add_resp.status_code}, Body: {add_resp.body}")
            
            if not self._is_success(add_resp.body):
                error_msg = self._get_field(add_resp.body, 'Message', '未知错误')
                code = self._get_field(add_resp.body, 'Code')
                logger.error(f"添加文件失败 - 错误码: {code}, 消息: {error_msg}")
                return {
                    "success": False,
                    "error": f"{code}: {error_msg}"
                }
            
            # 获取文件ID
            data = self._get_field(add_resp.body, 'Data')
            if not data:
                return {
                    "success": False,
                    "error": "AddFile响应缺少Data字段"
                }
            
            # 注意：AddFile 返回的字段是 'FileId' 而不是 'Id'
            file_id = self._get_field(data, 'FileId')
            request_id = self._get_field(add_resp.body, 'RequestId', '')
            
            logger.info(f"文件添加成功 - FileId: {file_id}")
            
            # 步骤5: 提交追加文件任务到知识库（重建索引）
            logger.info(f"开始将文件追加到知识库 {knowledge_base_id}...")
            add_docs_req = bailian_models.SubmitIndexAddDocumentsJobRequest(
                index_id=knowledge_base_id,
                document_ids=[file_id],
                source_type="DATA_CENTER_FILE"
            )
            
            add_docs_resp = self._request_with_retry(lambda: self.client.submit_index_add_documents_job(
                self.workspace_id,
                add_docs_req
            ))
            
            logger.info(f"SubmitIndexAddDocumentsJob API 响应 - HTTP状态码: {add_docs_resp.status_code}, Body: {add_docs_resp.body}")
            
            if not self._is_success(add_docs_resp.body):
                error_msg = self._get_field(add_docs_resp.body, 'Message', '未知错误')
                code = self._get_field(add_docs_resp.body, 'Code')
                logger.error(f"追加文件到知识库失败 - 错误码: {code}, 消息: {error_msg}")
                return {
                    "success": False,
                    "error": f"追加文件到知识库失败: {code}: {error_msg}"
                }
            
            # 获取追加任务ID
            add_docs_data = self._get_field(add_docs_resp.body, 'Data')
            job_id = self._get_field(add_docs_data, 'Id') if add_docs_data else None
            
            logger.info(f"文件追加任务提交成功 - JobId: {job_id}")
            
            return {
                "success": True,
                "file_id": file_id,
                "job_id": job_id,
                "request_id": request_id
            }
            
        except requests.exceptions.RequestException as e:
            logger.error(f"网络请求异常: {e}", exc_info=True)
            return {
                "success": False,
                "error": f"网络请求失败: {str(e)}"
            }
        except Exception as e:
            logger.error(f"添加文件异常: {e}", exc_info=True)
            return {
                "success": False,
                "error": str(e)
            }
    
    def describe_file(
        self, 
        file_id: str,
        index_id: str
    ) -> dict:
        """
        查询文件解析状态
        
        :param file_id: 文件ID
        :param index_id: 知识库ID
        :return: 包含文件状态的字典
        """
        try:
            # 根据文档，describe_file 方法直接传参数
            # 调用方式：client.describe_file(workspace_id, file_id)
            resp = self._request_with_retry(lambda: self.client.describe_file(
                self.workspace_id,
                file_id
            ))
            
            # 判断API调用是否成功
            code = self._get_field(resp.body, 'Code')
            success = self._as_bool(self._get_field(resp.body, 'Success'))
            status = self._get_field(resp.body, 'Status')
            logger.info(f"GetFile 解析字段 -> Success: {success}, Code: {code}, Status: {status}")
            
            if (code == 'Success') or success or (status == 200):
                data = self._get_field(resp.body, 'Data')
                
                file_status = self._get_field(data, 'Status')
                file_name = self._get_field(data, 'Name')
                request_id = self._get_field(resp.body, 'RequestId', '')
                
                return {
                    "success": True,
                    "status": file_status,
                    "file_name": file_name,
                    "request_id": request_id
                }
            else:
                error_msg = self._get_field(resp.body, 'Message', '未知错误')
                logger.error(f"查询文件状态失败 - 错误码: {code}, 消息: {error_msg}")
                return {
                    "success": False,
                    "error": f"{code}: {error_msg}"
                }
        except Exception as e:
            logger.error(f"查询文件状态异常: {e}", exc_info=True)
            return {
                "success": False,
                "error": str(e)
            }
    
    def get_index_job_status(
        self,
        index_id: str,
        job_id: str
    ) -> dict:
        """
        查询索引任务状态（用于追加文件任务）
        
        :param index_id: 知识库ID
        :param job_id: 任务ID
        :return: 包含任务状态的字典
        """
        try:
            get_job_req = bailian_models.GetIndexJobStatusRequest(
                index_id=index_id,
                job_id=job_id
            )
            
            resp = self._request_with_retry(lambda: self.client.get_index_job_status(
                self.workspace_id,
                get_job_req
            ))
            
            logger.info(f"GetIndexJobStatus API 响应 - HTTP状态码: {resp.status_code}, Body: {resp.body}")
            
            if not self._is_success(resp.body):
                error_msg = self._get_field(resp.body, 'Message', '未知错误')
                code = self._get_field(resp.body, 'Code')
                logger.error(f"查询任务状态失败 - 错误码: {code}, 消息: {error_msg}")
                return {
                    "success": False,
                    "error": f"{code}: {error_msg}"
                }
            
            # 获取任务状态
            data = self._get_field(resp.body, 'Data')
            if not data:
                return {
                    "success": False,
                    "error": "响应缺少Data字段"
                }
            
            job_status = self._get_field(data, 'Status')  # PENDING, RUNNING, COMPLETED, FAILED
            documents = self._get_field(data, 'Documents', [])
            request_id = self._get_field(resp.body, 'RequestId', '')
            
            logger.info(f"任务状态: {job_status}")
            
            return {
                "success": True,
                "status": job_status,
                "documents": documents,
                "request_id": request_id
            }
            
        except Exception as e:
            logger.error(f"查询任务状态异常: {e}", exc_info=True)
            return {
                "success": False,
                "error": str(e)
            }
    
    def delete_file(
        self, 
        knowledge_base_id: str, 
        file_id: str
    ) -> dict:
        """
        从知识库索引中删除文件（使用delete_index_document接口）
        
        :param knowledge_base_id: 知识库ID
        :param file_id: 文件ID
        :return: 删除结果
        """
        try:
            # 使用 delete_index_document 接口从知识库索引中删除文件
            delete_doc_req = bailian_models.DeleteIndexDocumentRequest(
                index_id=knowledge_base_id,
                document_ids=[file_id]
            )
            
            resp = self._request_with_retry(lambda: self.client.delete_index_document(
                self.workspace_id,
                delete_doc_req
            ))
            
            logger.info(f"DeleteIndexDocument API 响应 - HTTP状态码: {resp.status_code}, Body: {resp.body}")
            
            # 判断API调用是否成功（仅看 Success）
            if self._is_success(resp.body):
                data = self._get_field(resp.body, 'Data')
                deleted_docs = self._get_field(data, 'DeletedDocument', []) if data else []
                request_id = self._get_field(resp.body, 'RequestId', '')
                
                return {
                    "success": True,
                    "deleted_documents": deleted_docs,
                    "request_id": request_id
                }
            else:
                error_msg = self._get_field(resp.body, 'Message', '未知错误')
                code = self._get_field(resp.body, 'Code')
                logger.error(f"删除文件失败 - 错误码: {code}, 消息: {error_msg}")
                return {
                    "success": False,
                    "error": f"{code}: {error_msg}"
                }
        except Exception as e:
            logger.error(f"删除文件异常: {e}", exc_info=True)
            return {
                "success": False,
                "error": str(e)
            }
    
    def submit_index_job(self, knowledge_base_id: str) -> dict:
        """
        提交索引作业，完成知识库创建
        
        根据文档要求：CreateIndex 接口仅初始化知识库创建作业，
        必须调用 SubmitIndexJob 接口才能完成创建，否则将得到一个空的知识库。
        
        :param knowledge_base_id: 知识库ID
        :return: 提交结果
        """
        try:
            submit_req = bailian_models.SubmitIndexJobRequest()
            submit_req.index_id = knowledge_base_id
            
            # 使用位置参数：只传 workspace_id 和 submit_req
            resp = self._request_with_retry(lambda: self.client.submit_index_job(
                self.workspace_id,
                submit_req
            ))
            
            logger.info(f"SubmitIndexJob API 响应 - HTTP状态码: {resp.status_code}, Body: {resp.body}")
            
            # 判断API调用是否成功（仅看 Success）
            if self._is_success(resp.body):
                data = self._get_field(resp.body, 'Data')
                job_id = self._get_field(data, 'Id') if data else None
                request_id = self._get_field(resp.body, 'RequestId', '')
                
                return {
                    "success": True,
                    "job_id": job_id,
                    "request_id": request_id
                }
            else:
                error_msg = self._get_field(resp.body, 'Message', '未知错误')
                code = self._get_field(resp.body, 'Code')
                logger.error(f"提交索引作业失败 - 错误码: {code}, 消息: {error_msg}")
                return {
                    "success": False,
                    "error": f"{code}: {error_msg}"
                }
        except Exception as e:
            logger.error(f"提交索引作业异常: {e}", exc_info=True)
            return {
                "success": False,
                "error": str(e)
            }
    
    def delete_knowledge_base(self, knowledge_base_id: str) -> dict:
        """
        删除知识库
        
        :param knowledge_base_id: 知识库ID
        :return: 删除结果
        """
        try:
            # 创建删除请求对象
            delete_req = bailian_models.DeleteIndexRequest()
            delete_req.index_id = knowledge_base_id
            
            # 使用位置参数：只传 workspace_id 和 delete_req
            resp = self._request_with_retry(lambda: self.client.delete_index(
                self.workspace_id,
                delete_req
            ))
            
            logger.info(f"DeleteIndex API 响应 - HTTP状态码: {resp.status_code}, Body: {resp.body}")
            
            # 判断API调用是否成功（仅看 Success）
            if self._is_success(resp.body):
                request_id = self._get_field(resp.body, 'RequestId', '')
                return {
                    "success": True,
                    "request_id": request_id
                }
            else:
                error_msg = self._get_field(resp.body, 'Message', '未知错误')
                code = self._get_field(resp.body, 'Code')
                logger.error(f"删除知识库失败 - 错误码: {code}, 消息: {error_msg}")
                return {
                    "success": False,
                    "error": f"{code}: {error_msg}"
                }
        except Exception as e:
            logger.error(f"删除知识库异常: {e}", exc_info=True)
            return {
                "success": False,
                "error": str(e)
            }


# 初始化全局客户端
kb_client = BailianKnowledgeBaseClient(
    access_key_id=os.getenv("ALIBABA_CLOUD_ACCESS_KEY_ID", ACCESS_KEY_ID),
    access_key_secret=os.getenv("ALIBABA_CLOUD_ACCESS_KEY_SECRET", ACCESS_KEY_SECRET),
    workspace_id=os.getenv("WORKSPACE_ID", WORKSPACE_ID)
)


@router.post("/bailian/v1/pet/create_knowledge_base")
async def create_pet_knowledge_base(
    uid: str = Body(..., embed=True, description="用户ID"),
    pet_name: str = Body(..., embed=True, description="宠物名称"),
    request: Request = None,
    db: Session = Depends(get_db)
):
    """
    为宠物创建专属知识库
    
    注意：一个用户只能创建一个宠物知识库，如果已存在则返回现有的知识库信息
    """
    # Token验证
    try:
        token = decode_verify_token(request, GO_SERVICE_SECRET_KEY, ALGORITHM)
    except Exception as e:
        logger.error(f"Token验证失败 用户ID: {uid} | 错误信息: {e}")
        return BaseResponse(
            code=401,
            msg="Token验证失败",
            data={"error": str(e)}
        )
    
    # 检查该用户是否已有知识库
    existing_kb = db.query(PetKnowledgeBase).filter(
        PetKnowledgeBase.user_id == uid
    ).first()
    
    if existing_kb:
        logger.info(f"用户 {uid} 已有知识库，返回现有知识库信息")
        return BaseResponse(
            code=200,
            msg="用户已有知识库",
            data={
                "knowledge_base_id": existing_kb.knowledge_base_id,
                "pet_name": existing_kb.pet_name,
                "document_count": existing_kb.document_count,
                "created_time": existing_kb.created_time.strftime("%Y-%m-%d %H:%M:%S"),
                "is_new": False
            }
        )
    
    # 创建新知识库（名称长度限制：1-20字符）
    # 格式：Pet + UID后6位 + 时间戳后6位 = 最多15字符
    timestamp = int(time.time())
    uid_suffix = uid[-6:] if len(uid) >= 6 else uid
    time_suffix = str(timestamp)[-6:]
    kb_name = f"Pet{uid_suffix}{time_suffix}"
    description = f"用户{uid}的宠物{pet_name}专属知识库"
    
    result = kb_client.create_knowledge_base(
        name=kb_name,
        description=description
    )
    
    if not result["success"]:
        logger.error(f"创建知识库失败 用户ID: {uid} | 错误: {result['error']}")
        return BaseResponse(
            code=HTTPStatus.INTERNAL_SERVER_ERROR,
            msg="创建知识库失败",
            data={"error": result["error"]}
        )
    
    # 保存到数据库
    new_kb = PetKnowledgeBase(
        user_id=uid,
        pet_name=pet_name,
        workspace_id=kb_client.workspace_id,
        knowledge_base_id=result["knowledge_base_id"],
        knowledge_base_name=kb_name,
        description=description,
        document_count=0
    )
    
    try:
        db.add(new_kb)
        db.commit()
        db.refresh(new_kb)
    except Exception as e:
        db.rollback()
        logger.error(f"保存知识库记录失败 用户ID: {uid} | 错误: {e}")
        # 尝试删除已创建的知识库
        kb_client.delete_knowledge_base(result["knowledge_base_id"])
        return BaseResponse(
            code=HTTPStatus.INTERNAL_SERVER_ERROR,
            msg="保存知识库记录失败",
            data={"error": str(e)}
        )
    
    logger.info(f"成功创建宠物知识库 用户ID: {uid} | 知识库ID: {result['knowledge_base_id']}")
    return BaseResponse(
        code=HTTPStatus.OK,
        msg="成功创建宠物知识库",
        data={
            "knowledge_base_id": result["knowledge_base_id"],
            "pet_name": pet_name,
            "document_count": 0,
            "created_time": new_kb.created_time.strftime("%Y-%m-%d %H:%M:%S"),
            "is_new": True
        }
    )


@router.post("/bailian/v1/pet/add_knowledge_file")
async def add_pet_knowledge_file(
    uid: str = Body(..., embed=True, description="用户ID"),
    file_name: str = Body(..., embed=True, description="文件名"),
    file_url: str = Body(..., embed=True, description="文件URL（公开可访问）"),
    pet_name: Optional[str] = Body(None, embed=True, description="宠物名称（可选，如果没有知识库会自动创建）"),
    request: Request = None,
    db: Session = Depends(get_db)
):
    """
    通过URL添加文件到宠物专属知识库
    
    要求：
    1. 如果用户没有知识库，会自动创建一个（需要提供pet_name）
    2. 文件URL必须公开可访问
    3. 支持格式：Word(.docx, .doc)、TXT
    4. 文件大小限制：最大10MB
    5. 文档数量限制：每个用户最多上传10个文档
    """
    # Token验证
    try:
        token = decode_verify_token(request, GO_SERVICE_SECRET_KEY, ALGORITHM)
    except Exception as e:
        logger.error(f"Token验证失败 用户ID: {uid} | 错误信息: {e}")
        return BaseResponse(
            code=401,
            msg="Token验证失败",
            data={"error": str(e)}
        )
    
    # 检查文件格式
    if not (file_name.lower().endswith('.docx') or 
            file_name.lower().endswith('.doc') or 
            file_name.lower().endswith('.txt')):
        return BaseResponse(
            code=400,
            msg="不支持的文件格式，仅支持Word(.docx, .doc)和TXT格式",
            data={}
        )
    
    # 获取用户的知识库
    pet_kb = db.query(PetKnowledgeBase).filter(
        PetKnowledgeBase.user_id == uid
    ).first()
    
    # 检查用户已有的文档数量
    existing_file_count = db.query(PetKnowledgeFile).filter(
        PetKnowledgeFile.user_id == uid
    ).count()
    
    if existing_file_count >= MAX_DOCUMENTS_PER_USER:
        logger.warning(f"用户 {uid} 已达到文档数量上限 - 当前文档数: {existing_file_count}, 最大限制: {MAX_DOCUMENTS_PER_USER}")
        return BaseResponse(
            code=400,
            msg=f"已达到文档数量上限，每个用户最多上传 {MAX_DOCUMENTS_PER_USER} 个文档，当前已有 {existing_file_count} 个文档",
            data={
                "current_count": existing_file_count,
                "max_count": MAX_DOCUMENTS_PER_USER
            }
        )
    
    # 标记是否自动创建了知识库
    kb_auto_created = False
    
    # 如果没有知识库，自动创建一个
    if not pet_kb:
        # 如果没有提供pet_name，使用默认值
        if not pet_name:
            pet_name = "宠物"  # 默认宠物名称
        
        logger.info(f"用户 {uid} 没有知识库，自动创建知识库，宠物名称: {pet_name}")
        
        # 创建新知识库（名称长度限制：1-20字符）
        # 格式：Pet + UID后6位 + 时间戳后6位 = 最多15字符
        timestamp = int(time.time())
        uid_suffix = uid[-6:] if len(uid) >= 6 else uid
        time_suffix = str(timestamp)[-6:]
        kb_name = f"Pet{uid_suffix}{time_suffix}"
        description = f"用户{uid}的宠物{pet_name}专属知识库"
        
        result = kb_client.create_knowledge_base(
            name=kb_name,
            description=description
        )
        
        if not result["success"]:
            logger.error(f"自动创建知识库失败 用户ID: {uid} | 错误: {result['error']}")
            return BaseResponse(
                code=HTTPStatus.INTERNAL_SERVER_ERROR,
                msg="自动创建知识库失败",
                data={"error": result["error"]}
            )
        
        # 保存到数据库
        pet_kb = PetKnowledgeBase(
            user_id=uid,
            pet_name=pet_name,
            workspace_id=kb_client.workspace_id,
            knowledge_base_id=result["knowledge_base_id"],
            knowledge_base_name=kb_name,
            description=description,
            document_count=0
        )
        
        try:
            db.add(pet_kb)
            db.commit()
            db.refresh(pet_kb)
            kb_auto_created = True
            logger.info(f"成功自动创建宠物知识库 用户ID: {uid} | 知识库ID: {result['knowledge_base_id']}")
        except Exception as e:
            db.rollback()
            logger.error(f"保存知识库记录失败 用户ID: {uid} | 错误: {e}")
            # 尝试删除已创建的知识库
            kb_client.delete_knowledge_base(result["knowledge_base_id"])
            return BaseResponse(
                code=HTTPStatus.INTERNAL_SERVER_ERROR,
                msg="保存知识库记录失败",
                data={"error": str(e)}
            )
    
    # 添加文件到知识库
    result = kb_client.add_file_by_url(
        knowledge_base_id=pet_kb.knowledge_base_id,
        file_name=file_name,
        file_url=file_url
    )
    
    if not result["success"]:
        logger.error(f"添加文件失败 用户ID: {uid} | 错误: {result['error']}")
        return BaseResponse(
            code=HTTPStatus.INTERNAL_SERVER_ERROR,
            msg="添加文件失败",
            data={"error": result["error"]}
        )
    
    # 保存文件记录到数据库
    new_file = PetKnowledgeFile(
        user_id=uid,
        knowledge_base_id=pet_kb.knowledge_base_id,
        file_id=result["file_id"],
        file_name=file_name,
        file_url=file_url,
        file_status="PARSING",  # 初始状态为解析中
        job_id=result.get("job_id"),  # 追加任务ID
        job_status="PENDING",  # 追加任务初始状态
        workspace_id=kb_client.workspace_id,
        request_id=result["request_id"]
    )
    
    try:
        db.add(new_file)
        # 更新知识库文档数量
        pet_kb.document_count += 1
        db.commit()
        db.refresh(new_file)
    except Exception as e:
        db.rollback()
        logger.error(f"保存文件记录失败 用户ID: {uid} | 错误: {e}")
        # 尝试删除已添加的文件
        kb_client.delete_file(pet_kb.knowledge_base_id, result["file_id"])
        return BaseResponse(
            code=HTTPStatus.INTERNAL_SERVER_ERROR,
            msg="保存文件记录失败",
            data={"error": str(e)}
        )
    
    logger.info(f"成功添加文件到知识库 用户ID: {uid} | 文件ID: {result['file_id']} | 文件名: {file_name}")
    
    # 构建返回消息
    if kb_auto_created:
        msg = "已自动创建知识库并添加文件，正在追加到知识库中..."
    else:
        msg = "成功添加文件，正在追加到知识库中..."
    
    return BaseResponse(
        code=HTTPStatus.OK,
        msg=msg,
        data={
            "file_id": result["file_id"],
            "file_name": file_name,
            "file_status": "PARSING",
            "job_id": result.get("job_id"),
            "job_status": "PENDING",
            "knowledge_base_id": pet_kb.knowledge_base_id,
            "kb_auto_created": kb_auto_created,  # 标记是否自动创建了知识库
            "pet_name": pet_kb.pet_name
        }
    )


@router.post("/bailian/v1/pet/add_file_to_kb")
async def add_file_to_specific_kb(
    uid: str = Body(..., embed=True, description="用户ID"),
    knowledge_base_id: str = Body(..., embed=True, description="知识库ID"),
    file_name: str = Body(..., embed=True, description="文件名"),
    file_url: str = Body(..., embed=True, description="文件URL（公开可访问）"),
    request: Request = None,
    db: Session = Depends(get_db)
):
    """
    向指定的知识库直接添加文件
    
    说明：
    1. 用户必须指定目标知识库ID
    2. 只能添加文件到用户自己的知识库
    3. 文件URL必须公开可访问
    4. 支持格式：Word(.docx, .doc)、TXT
    5. 文件大小限制：最大10MB
    6. 文档数量限制：每个用户最多上传10个文档
    """
    # Token验证
    try:
        token = decode_verify_token(request, GO_SERVICE_SECRET_KEY, ALGORITHM)
    except Exception as e:
        logger.error(f"Token验证失败 用户ID: {uid} | 错误信息: {e}")
        return BaseResponse(
            code=401,
            msg="Token验证失败",
            data={"error": str(e)}
        )
    
    # 检查文件格式
    if not (file_name.lower().endswith('.docx') or 
            file_name.lower().endswith('.doc') or 
            file_name.lower().endswith('.txt')):
        return BaseResponse(
            code=400,
            msg="不支持的文件格式，仅支持Word(.docx, .doc)和TXT格式",
            data={}
        )
    
    # 验证知识库存在且属于该用户
    pet_kb = db.query(PetKnowledgeBase).filter(
        PetKnowledgeBase.knowledge_base_id == knowledge_base_id,
        PetKnowledgeBase.user_id == uid
    ).first()
    
    if not pet_kb:
        logger.warning(f"知识库不存在或不属于该用户 用户ID: {uid} | 知识库ID: {knowledge_base_id}")
        return BaseResponse(
            code=404,
            msg="知识库不存在或不属于该用户",
            data={}
        )
    
    # 检查用户已有的文档数量
    existing_file_count = db.query(PetKnowledgeFile).filter(
        PetKnowledgeFile.user_id == uid
    ).count()
    
    if existing_file_count >= MAX_DOCUMENTS_PER_USER:
        logger.warning(f"用户 {uid} 已达到文档数量上限 - 当前文档数: {existing_file_count}, 最大限制: {MAX_DOCUMENTS_PER_USER}")
        return BaseResponse(
            code=400,
            msg=f"已达到文档数量上限，每个用户最多上传 {MAX_DOCUMENTS_PER_USER} 个文档，当前已有 {existing_file_count} 个文档",
            data={
                "current_count": existing_file_count,
                "max_count": MAX_DOCUMENTS_PER_USER
            }
        )
    
    # 添加文件到知识库
    result = kb_client.add_file_by_url(
        knowledge_base_id=knowledge_base_id,
        file_name=file_name,
        file_url=file_url
    )
    
    if not result["success"]:
        logger.error(f"添加文件失败 用户ID: {uid} | 知识库ID: {knowledge_base_id} | 错误: {result['error']}")
        return BaseResponse(
            code=HTTPStatus.INTERNAL_SERVER_ERROR,
            msg="添加文件失败",
            data={"error": result["error"]}
        )
    
    # 保存文件记录到数据库
    new_file = PetKnowledgeFile(
        user_id=uid,
        knowledge_base_id=knowledge_base_id,
        file_id=result["file_id"],
        file_name=file_name,
        file_url=file_url,
        file_status="PARSING",  # 初始状态为解析中
        job_id=result.get("job_id"),  # 追加任务ID
        job_status="PENDING",  # 追加任务初始状态
        workspace_id=kb_client.workspace_id,
        request_id=result["request_id"]
    )
    
    try:
        db.add(new_file)
        # 更新知识库文档数量
        pet_kb.document_count += 1
        db.commit()
        db.refresh(new_file)
    except Exception as e:
        db.rollback()
        logger.error(f"保存文件记录失败 用户ID: {uid} | 知识库ID: {knowledge_base_id} | 错误: {e}")
        # 尝试删除已添加的文件
        kb_client.delete_file(knowledge_base_id, result["file_id"])
        return BaseResponse(
            code=HTTPStatus.INTERNAL_SERVER_ERROR,
            msg="保存文件记录失败",
            data={"error": str(e)}
        )
    
    logger.info(f"成功添加文件到知识库 用户ID: {uid} | 知识库ID: {knowledge_base_id} | 文件ID: {result['file_id']} | 文件名: {file_name}")
    
    return BaseResponse(
        code=HTTPStatus.OK,
        msg="成功添加文件，正在追加到知识库中...",
        data={
            "file_id": result["file_id"],
            "file_name": file_name,
            "file_status": "PARSING",
            "job_id": result.get("job_id"),
            "job_status": "PENDING",
            "knowledge_base_id": knowledge_base_id,
            "pet_name": pet_kb.pet_name
        }
    )


@router.post("/bailian/v1/pet/query_file_status")
async def query_file_status(
    uid: str = Body(..., embed=True, description="用户ID"),
    file_id: str = Body(..., embed=True, description="文件ID"),
    request: Request = None,
    db: Session = Depends(get_db)
):
    """
    查询文件解析状态
    
    状态说明：
    - PARSING: 解析中
    - PARSE_SUCCESS: 解析成功
    - PARSE_FAILED: 解析失败
    """
    # Token验证
    try:
        token = decode_verify_token(request, GO_SERVICE_SECRET_KEY, ALGORITHM)
    except Exception as e:
        logger.error(f"Token验证失败 用户ID: {uid} | 错误信息: {e}")
        return BaseResponse(
            code=401,
            msg="Token验证失败",
            data={"error": str(e)}
        )
    
    # 查询文件记录
    file_record = db.query(PetKnowledgeFile).filter(
        PetKnowledgeFile.user_id == uid,
        PetKnowledgeFile.file_id == file_id
    ).first()
    
    if not file_record:
        return BaseResponse(
            code=404,
            msg="未找到文件记录",
            data={}
        )
    
    # 查询文件状态
    result = kb_client.describe_file(
        file_id=file_id,
        index_id=file_record.knowledge_base_id
    )
    
    if not result["success"]:
        logger.error(f"查询文件状态失败 用户ID: {uid} | 文件ID: {file_id} | 错误: {result['error']}")
        return BaseResponse(
            code=HTTPStatus.INTERNAL_SERVER_ERROR,
            msg="查询文件状态失败",
            data={"error": result["error"]}
        )
    
    # 查询追加任务状态（如果有job_id）
    job_status = file_record.job_status
    if file_record.job_id and file_record.job_status not in ["COMPLETED", "FAILED"]:
        job_result = kb_client.get_index_job_status(
            index_id=file_record.knowledge_base_id,
            job_id=file_record.job_id
        )
        if job_result["success"]:
            job_status = job_result["status"]
            logger.info(f"追加任务状态: {job_status}")
    
    # 更新数据库中的状态
    try:
        file_record.file_status = result["status"]
        if job_status:
            file_record.job_status = job_status
        db.commit()
    except Exception as e:
        db.rollback()
        logger.error(f"更新文件状态失败 用户ID: {uid} | 错误: {e}")
    
    logger.info(f"查询文件状态 用户ID: {uid} | 文件ID: {file_id} | 文件状态: {result['status']} | 任务状态: {job_status}")
    return BaseResponse(
        code=HTTPStatus.OK,
        msg="查询成功",
        data={
            "file_id": file_id,
            "file_name": result["file_name"],
            "file_status": result["status"],
            "job_id": file_record.job_id,
            "job_status": job_status,
            "file_url": file_record.file_url
        }
    )


@router.post("/bailian/v1/pet/delete_knowledge_file")
async def delete_pet_knowledge_file(
    uid: str = Body(..., embed=True, description="用户ID"),
    file_id: str = Body(..., embed=True, description="文件ID"),
    request: Request = None,
    db: Session = Depends(get_db)
):
    """
    删除知识库中的文件
    """
    # Token验证
    try:
        token = decode_verify_token(request, GO_SERVICE_SECRET_KEY, ALGORITHM)
    except Exception as e:
        logger.error(f"Token验证失败 用户ID: {uid} | 错误信息: {e}")
        return BaseResponse(
            code=401,
            msg="Token验证失败",
            data={"error": str(e)}
        )
    
    # 查询文件记录
    file_record = db.query(PetKnowledgeFile).filter(
        PetKnowledgeFile.user_id == uid,
        PetKnowledgeFile.file_id == file_id
    ).first()
    
    if not file_record:
        return BaseResponse(
            code=404,
            msg="未找到文件记录",
            data={}
        )
    
    # 删除文件
    result = kb_client.delete_file(
        knowledge_base_id=file_record.knowledge_base_id,
        file_id=file_id
    )
    
    if not result["success"]:
        logger.error(f"删除文件失败 用户ID: {uid} | 文件ID: {file_id} | 错误: {result['error']}")
        return BaseResponse(
            code=HTTPStatus.INTERNAL_SERVER_ERROR,
            msg="删除文件失败",
            data={"error": result["error"]}
        )
    
    # 删除数据库记录
    try:
        kb_id = file_record.knowledge_base_id
        db.delete(file_record)
        
        # 更新知识库文档数量
        pet_kb = db.query(PetKnowledgeBase).filter(
            PetKnowledgeBase.knowledge_base_id == kb_id
        ).first()
        if pet_kb and pet_kb.document_count > 0:
            pet_kb.document_count -= 1
        
        db.commit()
    except Exception as e:
        db.rollback()
        logger.error(f"删除文件记录失败 用户ID: {uid} | 错误: {e}")
        return BaseResponse(
            code=HTTPStatus.INTERNAL_SERVER_ERROR,
            msg="删除文件记录失败",
            data={"error": str(e)}
        )
    
    logger.info(f"成功删除文件 用户ID: {uid} | 文件ID: {file_id}")
    return BaseResponse(
        code=HTTPStatus.OK,
        msg="成功删除文件",
        data={
            "file_id": file_id
        }
    )


@router.post("/bailian/v1/pet/get_knowledge_base_info")
async def get_pet_knowledge_base_info(
    uid: str = Body(..., embed=True, description="用户ID"),
    request: Request = None,
    db: Session = Depends(get_db)
):
    """
    获取用户的宠物知识库信息
    """
    # Token验证
    try:
        token = decode_verify_token(request, GO_SERVICE_SECRET_KEY, ALGORITHM)
    except Exception as e:
        logger.error(f"Token验证失败 用户ID: {uid} | 错误信息: {e}")
        return BaseResponse(
            code=401,
            msg="Token验证失败",
            data={"error": str(e)}
        )
    
    # 查询知识库
    pet_kb = db.query(PetKnowledgeBase).filter(
        PetKnowledgeBase.user_id == uid
    ).first()
    
    if not pet_kb:
        return BaseResponse(
            code=404,
            msg="未找到宠物知识库",
            data={}
        )
    
    # 查询该知识库的所有文件
    files = db.query(PetKnowledgeFile).filter(
        PetKnowledgeFile.knowledge_base_id == pet_kb.knowledge_base_id
    ).all()
    
    file_list = [
        {
            "file_id": f.file_id,
            "file_name": f.file_name,
            "file_status": f.file_status,
            "job_id": f.job_id,
            "job_status": f.job_status,
            "file_url": f.file_url,
            "created_time": f.created_time.strftime("%Y-%m-%d %H:%M:%S")
        }
        for f in files
    ]
    
    return BaseResponse(
        code=HTTPStatus.OK,
        msg="查询成功",
        data={
            "knowledge_base_id": pet_kb.knowledge_base_id,
            "pet_name": pet_kb.pet_name,
            "document_count": pet_kb.document_count,
            "created_time": pet_kb.created_time.strftime("%Y-%m-%d %H:%M:%S"),
            "files": file_list
        }
    )

