import base64
import os
from typing import List, Tuple, Optional, Dict
from pathlib import Path
from PIL import Image
import io
import requests
from utils.logger import Logger
from utils.exceptions import ToolError
from middleware.milvus_client import MilvusClient, VectorDatabaseRouter
from middleware.redis_adapter import get_redis_adapter
from tools.text_processing import text_cleaning, get_embedding
from apps.tool_call.api_client import APIClient  # 复用工具调用的API客户端

# 向量检索配置
VECTOR_RETRIEVAL_CONFIG = {
    "default_db": "milvus",  # 默认使用Milvus
    "collection_mapping": {
        "multi_modal_kb": "milvus"
    },
    "search_params": {
        "metric_type": "COSINE",
        "top_k": 5,
        "timeout": 10.0
    }
}

# 初始化日志
logger = Logger.get_logger(agent_name="multi_modal_tool")


class MultiModalTools:
    """多模态工具集：图片处理、跨模态检索、多模态答案生成"""

    def __init__(self):
        """初始化：图片处理配置、多模态向量库、API客户端（调用GPT-4V）"""
        try:
            # 初始化向量数据库路由器
            self.vector_router = VectorDatabaseRouter(
                default_db="milvus",
                db_configs={
                    "milvus": {}
                }
            )
            self.redis_client = get_redis_adapter()  # 使用Redis适配器，支持集群模式
            self.api_client = APIClient()  # 复用工具调用的API客户端
            self.mm_collection = "multi_modal_kb"  # 多模态向量集合名
            self.supported_image_formats = {"png", "jpg", "jpeg", "bmp"}
            self.image_resize_size = (512, 512)  # 图片统一缩放尺寸
            self.mm_embedding_dim = 1024  # 多模态嵌入维度（CLIP模型标准）

            # 初始化多模态集合
            self._init_milvus_collection()
            logger.info("MultiModalTools initialized successfully")
        except Exception as e:
            logger.error(f"MultiModalTools初始化失败: {str(e)}", exc_info=True)
            raise ToolError(
                message="多模态工具初始化失败",
                context={"error": str(e)}
            ) from e

    def _init_milvus_collection(self):
        """初始化多模态向量集合（字段：id、image_id、text_desc、mm_embedding）"""
        try:
            # 获取数据库类型
            db_type = VECTOR_RETRIEVAL_CONFIG["collection_mapping"].get(self.mm_collection, "milvus")
            
            # 定义集合模式
            collection_schema = [
                {"name": "id", "type": "INT64", "is_primary": True},
                {"name": "image_id", "type": "VARCHAR", "params": {"max_length": 128}},
                {"name": "text_desc", "type": "VARCHAR", "params": {"max_length": 2048}},
                {"name": "mm_embedding", "type": "FLOAT_VECTOR", "params": {"dim": self.mm_embedding_dim}}
            ]
            
            # 使用向量路由器创建集合
            if not self.vector_router.collection_exists(self.mm_collection, db_type):
                self.vector_router.create_collection(
                    collection_name=self.mm_collection,
                    schema=collection_schema,
                    index_params={"index_type": "IVF_FLAT", "nlist": 1024},
                    db_type=db_type
                )
                logger.info(f"初始化集合: {self.mm_collection}")
        except Exception as e:
            logger.error(f"初始化多模态集合失败: {str(e)}")
            raise ToolError(
                message="初始化多模态集合失败",
                context={"error": str(e)}
            ) from e

    def _validate_image_format(self, image_path: Optional[str] = None, base64_str: Optional[str] = None) -> bool:
        """验证图片格式（文件路径/Base64）"""
        if image_path:
            file_ext = Path(image_path).suffix.lower().lstrip(".")
            if file_ext not in self.supported_image_formats:
                logger.warning(f"不支持的图片格式: {file_ext}，支持格式：{self.supported_image_formats}")
                return False
        elif base64_str:
            # Base64格式无需验证后缀，后续解析时校验
            pass
        else:
            logger.warning("未提供图片路径或Base64字符串")
            return False
        return True

    def image_to_base64(self, image_path: str) -> str:
        """图片文件转为Base64编码（用于API调用）"""
        if not os.path.exists(image_path):
            logger.error(f"图片不存在: {image_path}")
            raise ToolError(
                message="图片不存在",
                context={"image_path": image_path}
            )

        if not self._validate_image_format(image_path=image_path):
            raise ToolError(
                message="不支持的图片格式",
                context={"image_path": image_path, "supported_formats": list(self.supported_image_formats)}
            )

        try:
            with open(image_path, "rb") as f:
                base64_bytes = base64.b64encode(f.read())
                return base64_bytes.decode("utf-8")
        except Exception as e:
            logger.error(f"图片转Base64失败: image_path={image_path}, error={str(e)}", exc_info=True)
            raise ToolError(
                message="图片转Base64失败",
                context={"image_path": image_path, "error": str(e)}
            ) from e

    def base64_to_image(self, base64_str: str) -> Image.Image:
        """Base64编码转为PIL图片对象"""
        try:
            base64_bytes = base64.b64decode(base64_str)
            image = Image.open(io.BytesIO(base64_bytes)).convert("RGB")
            # 统一缩放尺寸
            image = image.resize(self.image_resize_size, Image.Resampling.LANCZOS)
            logger.debug("Base64转图片成功")
            return image
        except Exception as e:
            logger.error(f"Base64转图片失败: error={str(e)}", exc_info=True)
            raise ToolError(
                message="Base64转图片失败",
                context={"error": str(e)}
            ) from e

    def generate_image_description(
            self,
            image_path: Optional[str] = None,
            base64_str: Optional[str] = None,
            prompt: str = "详细描述图片内容，包括物体、颜色、场景、文字信息"
    ) -> str:
        """生成图片描述（调用GPT-4V API）"""
        # 二选一：图片路径或Base64
        if not image_path and not base64_str:
            raise ToolError(message="必须提供图片路径或Base64编码")

        try:
            # 处理图片输入
            if image_path:
                base64_str = self.image_to_base64(image_path)

            # 调用GPT-4V API（复用tool_call的APIClient）
            api_params = {
                "model": "gpt-4-vision-preview",
                "messages": [
                    {
                        "role": "user",
                        "content": [
                            {"type": "text", "text": prompt},
                            {"type": "image_url", "image_url": {"url": base64_str}}
                        ]
                    }
                ],
                "max_tokens": 512
            }
            response = self.api_client.call_openai_api(api_params)
            description = response["choices"][0]["message"]["content"]
            cleaned_description = text_cleaning(description)

            logger.debug(f"图片描述生成完成: 描述长度={len(cleaned_description)}")
            return cleaned_description
        except ToolError:
            raise
        except Exception as e:
            logger.error(f"图片描述生成失败: error={str(e)}", exc_info=True)
            raise ToolError(
                message="图片描述生成失败",
                context={"image_path": image_path, "error": str(e)}
            ) from e

    def generate_multi_modal_embedding(
            self,
            text: Optional[str] = None,
            image_path: Optional[str] = None,
            base64_str: Optional[str] = None
    ) -> List[float]:
        """生成多模态嵌入（文本+图片融合，当前用占位实现，可替换为CLIP模型）"""
        try:
            # 文本嵌入
            text_embedding = get_embedding(text or "") if text else [0.0] * 768

            # 图片嵌入（占位：随机向量，实际用CLIP模型提取）
            image_embedding = [0.0] * self.mm_embedding_dim
            if image_path or base64_str:
                # 实际场景：用CLIP模型提取图片嵌入（维度512），再拼接文本嵌入（768）→ 1280维（需调整dim）
                # 此处简化为随机向量
                import numpy as np
                image_embedding = np.random.randn(self.mm_embedding_dim).tolist()

            # 融合嵌入（简化：取平均，实际用注意力机制融合）
            fused_embedding = [(t + i) / 2 for t, i in zip(text_embedding[:self.mm_embedding_dim], image_embedding)]
            logger.debug("多模态嵌入生成完成")
            return fused_embedding
        except Exception as e:
            logger.error(f"多模态嵌入生成失败: text={text[:30] if text else None}, error={str(e)}", exc_info=True)
            raise ToolError(
                message="多模态嵌入生成失败",
                context={"text": text, "error": str(e)}
            ) from e

    def cross_modal_search(
            self,
            query_text: Optional[str] = None,
            query_image_path: Optional[str] = None,
            query_base64: Optional[str] = None,
            top_k: int = 3
    ) -> List[Tuple[str, str, float]]:
        """跨模态检索（文本查图片/图片查文本）"""
        # 必须提供查询输入（文本或图片）
        if not query_text and not query_image_path and not query_base64:
            raise ToolError(message="必须提供查询文本或查询图片")

        try:
            # 生成查询嵌入
            query_embedding = self.generate_multi_modal_embedding(
                text=query_text,
                image_path=query_image_path,
                base64_str=query_base64
            )

            # 获取数据库类型
            db_type = VECTOR_RETRIEVAL_CONFIG["collection_mapping"].get(self.mm_collection, "milvus")
            
            # 使用向量路由器进行检索
            results = self.vector_router.search(
                collection_name=self.mm_collection,
                query_vectors=[query_embedding],
                top_k=top_k,
                metric_type="COSINE",
                db_type=db_type,
                vector_field="mm_embedding"  # 指定向量字段
            )

            # 解析结果（image_id + text_desc + 相似度）
            retrieved_results = []
            if results and len(results) > 0:
                # 批量获取实体信息
                hit_ids = [hit.id for hit in results[0]]
                if hit_ids:
                    entities = self.vector_router.get_entities_by_id(
                        collection_name=self.mm_collection,
                        ids=hit_ids,
                        db_type=db_type
                    )
                    
                    # 构建ID到实体的映射
                    id_to_entity = {entity.get("id"): entity for entity in entities}
                    
                    # 匹配命中结果和实体信息
                    for hit in results[0]:
                        entity = id_to_entity.get(hit.id)
                        if entity and "image_id" in entity and "text_desc" in entity:
                            retrieved_results.append((
                                entity["image_id"],  # 图片ID
                                entity["text_desc"],  # 图片描述
                                round(hit.score, 4)  # 相似度
                            ))

            retrieved_results.sort(key=lambda x: x[2], reverse=True)
            logger.debug(f"跨模态检索完成: 有效结果数={len(retrieved_results)}")
            return retrieved_results
        except Exception as e:
            logger.error(f"跨模态检索失败: query_text={query_text[:30] if query_text else None}, error={str(e)}",
                         exc_info=True)
            raise ToolError(
                message="跨模态检索失败",
                context={"query_text": query_text, "error": str(e)}
            ) from e

    def generate_multi_modal_answer(
            self,
            query_text: str,
            query_image_path: Optional[str] = None,
            query_base64: Optional[str] = None,
            similarity_threshold: float = 0.6
    ) -> str:
        """多模态答案生成（文本+图片联合问答）"""
        try:
            # 1. 跨模态检索相关内容
            retrieved_results = self.cross_modal_search(
                query_text=query_text,
                query_image_path=query_image_path,
                query_base64=query_base64
            )

            # 2. 过滤低相似度结果
            valid_results = [res for res in retrieved_results if res[2] >= similarity_threshold]
            context = "\n".join(
                [f"相关图片{idx + 1}描述：{res[1]}（相似度：{res[2]}）" for idx, res in enumerate(valid_results)])

            # 3. 生成最终答案（调用LLM融合上下文）
            if valid_results:
                prompt = f"用户问题：{query_text}\n\n相关图片信息：{context}\n\n请结合图片信息和你的知识回答用户问题"
            else:
                prompt = f"用户问题：{query_text}\n\n无相关图片信息，请仅根据你的知识回答"

            # 调用OpenAI API生成答案
            api_params = {
                "model": "gpt-3.5-turbo",
                "messages": [{"role": "user", "content": prompt}],
                "max_tokens": 1024
            }
            response = self.api_client.call_openai_api(api_params)
            answer = response["choices"][0]["message"]["content"]

            logger.debug(f"多模态答案生成完成: query={query_text[:30]}")
            return answer
        except Exception as e:
            logger.error(f"多模态答案生成失败: query_text={query_text}, error={str(e)}", exc_info=True)
            raise ToolError(
                message="多模态答案生成失败",
                context={"query_text": query_text, "error": str(e)}
            ) from e
            
    def add_image_to_vector_db(self, image_path: str, image_desc: str, image_id: Optional[str] = None) -> Dict[str, Any]:
        """将图片及其描述添加到多模态向量库"""
        import uuid
        import time
        try:
            # 1. 验证图片格式
            if not self._validate_image_format(image_path=image_path):
                raise ToolError(
                    message="图片格式不支持",
                    context={"image_path": image_path}
                )
            
            # 2. 生成或使用提供的图片ID
            if not image_id:
                image_id = str(uuid.uuid4())
            
            # 3. 生成多模态嵌入
            mm_embedding = self.generate_multi_modal_embedding(image_path=image_path, text=image_desc)
            if not mm_embedding:
                raise ToolError(
                    message="生成多模态嵌入失败",
                    context={"image_id": image_id}
                )
            
            # 4. 获取数据库类型
            db_type = VECTOR_RETRIEVAL_CONFIG["collection_mapping"].get(self.mm_collection, "milvus")
            
            # 5. 存储到向量数据库
            data = [
                {
                    "id": int(time.time()),
                    "image_id": image_id,
                    "text_desc": image_desc,
                    "mm_embedding": mm_embedding
                }
            ]
            
            insert_result = self.vector_router.insert(
                collection_name=self.mm_collection,
                data=data,
                db_type=db_type
            )
            
            # 6. 缓存图片路径到Redis
            self.redis_client.set(
                key=f"mm:image:{image_id}",
                value=image_path,
                ex=3600  # 1小时缓存
            )
            
            logger.info(f"图片添加到多模态向量库成功: image_id={image_id}, db_type={db_type}")
            return {
                "image_id": image_id,
                "status": "success",
                "insert_ids": insert_result if isinstance(insert_result, list) else []
            }
        except Exception as e:
            logger.error(f"添加图片到向量库失败: image_path={image_path}, error={str(e)}", exc_info=True)
            raise ToolError(
                message="添加图片到向量库失败",
                context={"image_path": image_path, "error": str(e)}
            ) from e
            
    def batch_add_images_to_vector_db(self, images_data: List[Dict[str, str]]) -> Dict[str, Any]:
        """批量添加图片到多模态向量库（优化版）"""
        import uuid
        import time
        try:
            success_count = 0
            failed_count = 0
            failed_images = []
            all_insert_ids = []
            
            # 获取数据库类型
            db_type = VECTOR_RETRIEVAL_CONFIG["collection_mapping"].get(self.mm_collection, "milvus")
            
            # 批量处理图片
            batch_data = []
            for idx, image_data in enumerate(images_data):
                try:
                    image_path = image_data.get("image_path")
                    image_desc = image_data.get("image_desc", "")
                    image_id = image_data.get("image_id", str(uuid.uuid4()))
                    
                    # 验证必填字段
                    if not image_path or not image_desc:
                        failed_count += 1
                        failed_images.append({
                            "image_path": image_path,
                            "reason": "缺少图片路径或描述"
                        })
                        continue
                    
                    # 验证图片格式
                    if not self._validate_image_format(image_path=image_path):
                        failed_count += 1
                        failed_images.append({
                            "image_path": image_path,
                            "reason": "图片格式不支持"
                        })
                        continue
                    
                    # 生成多模态嵌入
                    mm_embedding = self.generate_multi_modal_embedding(image_path=image_path, text=image_desc)
                    if not mm_embedding:
                        failed_count += 1
                        failed_images.append({
                            "image_path": image_path,
                            "reason": "生成多模态嵌入失败"
                        })
                        continue
                    
                    # 添加到批量数据
                    batch_data.append({
                        "id": int(time.time() * 1000 + idx),
                        "image_id": image_id,
                        "text_desc": image_desc,
                        "mm_embedding": mm_embedding
                    })
                    
                    # 缓存图片路径
                    self.redis_client.set(
                        key=f"mm:image:{image_id}",
                        value=image_path,
                        ex=3600  # 1小时缓存
                    )
                    
                except Exception as e:
                    failed_count += 1
                    failed_images.append({
                        "image_path": image_data.get("image_path"),
                        "reason": str(e)
                    })
            
            # 批量插入向量数据库
            if batch_data:
                try:
                    insert_result = self.vector_router.insert(
                        collection_name=self.mm_collection,
                        data=batch_data,
                        db_type=db_type
                    )
                    success_count = len(batch_data)
                    all_insert_ids = insert_result if isinstance(insert_result, list) else []
                except Exception as e:
                    logger.error(f"批量插入向量数据库失败: {str(e)}")
                    failed_count += len(batch_data)
                    failed_images.extend([
                        {"image_path": "batch", "reason": f"批量插入失败: {str(e)}"}
                    ])
            
            logger.info(f"批量添加图片完成: 成功={success_count}, 失败={failed_count}, db_type={db_type}")
            return {
                "total": len(images_data),
                "success_count": success_count,
                "failed_count": failed_count,
                "failed_images": failed_images,
                "insert_ids": all_insert_ids
            }
        except Exception as e:
            logger.error(f"批量添加图片失败: error={str(e)}", exc_info=True)
            raise ToolError(
                message="批量添加图片失败",
                context={"error": str(e)}
            ) from e
            
    def generate_answer_from_multi_modal_query(self, query: str, image_path: Optional[str] = None, top_k: int = 3) -> Dict[str, Any]:
        """基于多模态查询生成答案"""
        try:
            # 1. 准备查询向量
            query_embedding = None
            if image_path:
                # 图像查询：生成图像嵌入
                query_embedding = self.generate_multi_modal_embedding(image_path=image_path, text=query)
            else:
                # 文本查询：生成文本嵌入
                query_embedding = self.generate_multi_modal_embedding(text=query)
            
            if not query_embedding:
                raise ToolError(
                    message="生成查询嵌入失败",
                    context={"query": query, "has_image": bool(image_path)}
                )
            
            # 2. 获取数据库类型
            db_type = VECTOR_RETRIEVAL_CONFIG["collection_mapping"].get(self.mm_collection, "milvus")
            
            # 3. 检索相关的多模态内容
            results = self.vector_router.search(
                collection_name=self.mm_collection,
                query_vectors=[query_embedding],
                top_k=top_k,
                metric_type="COSINE",
                db_type=db_type,
                vector_field="mm_embedding"
            )
            
            # 4. 构建上下文（批量获取实体信息）
            context_parts = []
            if results and len(results) > 0:
                # 批量获取实体信息
                hit_ids = [hit.id for hit in results[0]]
                if hit_ids:
                    entities = self.vector_router.get_entities_by_id(
                        collection_name=self.mm_collection,
                        ids=hit_ids,
                        db_type=db_type
                    )
                    
                    # 构建ID到实体的映射
                    id_to_entity = {entity.get("id"): entity for entity in entities}
                    
                    # 匹配命中结果和实体信息
                    for hit in results[0]:
                        entity = id_to_entity.get(hit.id)
                        if entity and "text_desc" in entity and "image_id" in entity:
                            text_desc = entity["text_desc"]
                            image_id = entity["image_id"]
                            context_parts.append(
                                f"图片ID: {image_id}\n图片描述: {text_desc}\n相似度: {round(hit.score, 4)}"
                            )
            
            context = "\n\n".join(context_parts)
            
            # 5. 构建问答提示
            def _build_qa_prompt(query, context):
                return f"基于以下相关信息回答问题:\n\n{context}\n\n问题: {query}\n\n请结合提供的信息给出准确答案。"
            
            # 6. 调用LLM生成答案
            if context_parts:
                prompt = _build_qa_prompt(query, context)
                api_params = {
                    "model": "gpt-4",
                    "messages": [{"role": "user", "content": prompt}],
                    "max_tokens": 1024
                }
            else:
                # 没有找到相关内容，直接回答
                prompt = f"用户问题: {query}\n请直接回答这个问题。"
                api_params = {
                    "model": "gpt-4",
                    "messages": [{"role": "user", "content": prompt}],
                    "max_tokens": 1024
                }
                
            response = self.api_client.call_openai_api(api_params)
            answer = response["choices"][0]["message"]["content"]
            
            return {
                "answer": answer,
                "context": context,
                "has_relevant_context": len(context_parts) > 0,
                "db_type": db_type
            }
        except Exception as e:
            logger.error(f"多模态查询答案生成失败: query={query}, error={str(e)}", exc_info=True)
            raise ToolError(
                message="多模态查询答案生成失败",
                context={"query": query, "error": str(e)}
            ) from e


# 单例工具实例
multi_modal_tools = MultiModalTools()

# 测试入口
if __name__ == "__main__":
    test_logger = Logger.update_context(task_id="multi_modal_test", agent_name="multi_modal_tool")
    test_logger.info("开始测试多模态工具")

    # 测试图片路径（替换为实际路径）
    test_image_path = "test_image.jpg"
    test_query = "这张图片中提到的技术框架是什么？"

    try:
        # 1. 生成图片描述
        img_desc = multi_modal_tools.generate_image_description(image_path=test_image_path)
        test_logger.info(f"图片描述: {img_desc}")

        # 2. 生成多模态嵌入
        mm_embedding = multi_modal_tools.generate_multi_modal_embedding(text=test_query, image_path=test_image_path)
        test_logger.info(f"多模态嵌入维度: {len(mm_embedding)}")

        # 3. 多模态问答
        answer = multi_modal_tools.generate_multi_modal_answer(query_text=test_query, query_image_path=test_image_path)
        test_logger.info(f"多模态答案: {answer}")

        test_logger.success("多模态工具测试完成")
    except ToolError as e:
        test_logger.error(f"测试失败: {e}")