"""
Knowledge Base Module
知识库构建和管理模块
"""

import os
import json
import pickle
import logging
from typing import Dict, List, Tuple
import torch
import faiss
import numpy as np
from PIL import Image
from transformers import CLIPProcessor, CLIPModel

logger = logging.getLogger(__name__)


class KnowledgeBase:
    """COCO数据集知识库构建器"""
    
    def __init__(self, config: dict):
        """
        初始化知识库构建器
        
        Args:
            config: 配置字典
        """
        self.config = config
        self.clip_model = None
        self.processor = None
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        
    def load_clip_model(self):
        """加载CLIP模型和处理器"""
        try:
            model_path = self.config['models']['clip_model_path']
            self.clip_model = CLIPModel.from_pretrained(model_path)
            self.processor = CLIPProcessor.from_pretrained(model_path)
            self.clip_model.to(self.device)
            self.clip_model.eval()
            logger.info(f"CLIP模型加载成功，使用设备: {self.device}")
        except Exception as e:
            logger.error(f"CLIP模型加载失败: {e}")
            raise
    
    def load_coco_annotations(self) -> Dict[int, List[str]]:
        """
        加载COCO标注文件，构建image_id到描述的映射
        
        Returns:
            Dict[int, List[str]]: 图像ID到描述列表的映射
        """
        try:
            annotations_path = self.config['data']['coco_annotations_path']
            with open(annotations_path, 'r', encoding='utf-8') as f:
                data = json.load(f)
            
            # 构建image_id到captions的映射
            image_id_to_captions = {}
            for annotation in data['annotations']:
                image_id = annotation['image_id']
                caption = annotation['caption']
                
                if image_id not in image_id_to_captions:
                    image_id_to_captions[image_id] = []
                image_id_to_captions[image_id].append(caption)
            
            # 限制每个图像最多5个描述
            for image_id in image_id_to_captions:
                image_id_to_captions[image_id] = image_id_to_captions[image_id][:5]
            
            logger.info(f"加载了 {len(image_id_to_captions)} 个图像的标注信息")
            return image_id_to_captions
            
        except Exception as e:
            logger.error(f"加载COCO标注文件失败: {e}")
            raise
    
    def extract_image_features(self, image_path: str) -> np.ndarray:
        """
        提取单张图像的特征向量
        
        Args:
            image_path: 图像路径
            
        Returns:
            np.ndarray: 特征向量
        """
        try:
            image = Image.open(image_path).convert('RGB')
            inputs = self.processor(images=image, return_tensors="pt")
            inputs = {k: v.to(self.device) for k, v in inputs.items()}
            
            with torch.no_grad():
                image_features = self.clip_model.get_image_features(**inputs)
                image_features = image_features / image_features.norm(dim=-1, keepdim=True)
                return image_features.cpu().numpy().flatten()
                
        except Exception as e:
            logger.error(f"提取图像特征失败 {image_path}: {e}")
            return None
    
    def build_knowledge_base(self, batch_size: int = 32):
        """
        构建COCO数据集的向量知识库
        
        Args:
            batch_size: 批处理大小
        """
        try:
            # 加载CLIP模型
            self.load_clip_model()
            
            # 加载COCO标注
            image_id_to_captions = self.load_coco_annotations()
            
            # 获取所有图像路径
            images_dir = self.config['data']['coco_images_dir']
            image_paths = []
            valid_image_ids = []
            
            for image_id in image_id_to_captions.keys():
                image_path = os.path.join(images_dir, f"{image_id:012d}.jpg")
                if os.path.exists(image_path):
                    image_paths.append(image_path)
                    valid_image_ids.append(image_id)
            
            logger.info(f"找到 {len(image_paths)} 个有效图像")
            
            # 提取特征向量
            features = []
            valid_captions = {}
            
            for i, (image_path, image_id) in enumerate(zip(image_paths, valid_image_ids)):
                if i % 100 == 0:
                    logger.info(f"处理进度: {i}/{len(image_paths)}")
                
                feature = self.extract_image_features(image_path)
                if feature is not None:
                    features.append(feature)
                    valid_captions[image_id] = image_id_to_captions[image_id]
            
            if not features:
                raise ValueError("没有成功提取到任何特征向量")
            
            # 构建FAISS索引
            features_array = np.array(features).astype('float32')
            dimension = features_array.shape[1]
            
            index = faiss.IndexFlatIP(dimension)  # 使用内积相似度
            index.add(features_array)
            
            # 保存索引和映射关系
            knowledge_base_path = self.config['knowledge_base']['knowledge_base_path']
            captions_path = self.config['knowledge_base']['image_id_to_captions_path']
            
            # 确保输出目录存在
            os.makedirs(os.path.dirname(knowledge_base_path), exist_ok=True)
            os.makedirs(os.path.dirname(captions_path), exist_ok=True)
            
            # 保存FAISS索引
            faiss.write_index(index, knowledge_base_path)
            
            # 保存映射关系
            with open(captions_path, 'wb') as f:
                pickle.dump(valid_captions, f)
            
            logger.info(f"知识库构建完成:")
            logger.info(f"  - 向量索引: {knowledge_base_path}")
            logger.info(f"  - 映射文件: {captions_path}")
            logger.info(f"  - 特征维度: {dimension}")
            logger.info(f"  - 图像数量: {len(features)}")
            
        except Exception as e:
            logger.error(f"构建知识库失败: {e}")
            raise
