import torch
import numpy as np
import os
from PIL import Image
from transformers import (
    CLIPProcessor,
    CLIPModel,
    ChineseCLIPModel,
    ChineseCLIPProcessor,
)
from typing import Optional, Type, Union, List
from utils.logger_config import get_logger

logger = get_logger(__name__)


class BaseClipModelWrapper:
    """CLIP模型包装器的基类，封装通用逻辑。"""

    def __init__(
        self,
        model_class: Type[Union[CLIPModel, ChineseCLIPModel]],
        processor_class: Type[Union[CLIPProcessor, ChineseCLIPProcessor]],
        model_name: str,
        device: Optional[str] = None,
        local_model_path: Optional[str] = None,
        offline_mode: bool = False,
    ):
        """
        初始化模型和处理器。
        :param model_class: CLIP或ChineseCLIP模型类。
        :param processor_class: CLIP或ChineseCLIP处理器类。
        :param model_name: 模型名称。
        :param device: 计算设备。
        :param local_model_path: 本地模型路径。
        :param offline_mode: 是否使用离线模式。
        """
        if device == "auto" or device is None:
            self.device = "cuda" if torch.cuda.is_available() else "cpu"
        else:
            self.device = device
        self.model_name = model_name
        self.offline_mode = offline_mode
        
        # 确定模型路径
        model_path = local_model_path if local_model_path and os.path.exists(local_model_path) else model_name
        
        try:
            logger.info(f"正在加载模型: {model_path}")
            
            # 尝试加载模型
            if offline_mode and not os.path.exists(model_path):
                raise FileNotFoundError(f"离线模式下未找到本地模型: {model_path}")
            
            # 检查模型文件格式
            model_path_str = str(model_path)
            
            # 检查是否存在 safetensors 文件
            has_safetensors = os.path.exists(os.path.join(model_path_str, "model.safetensors")) or \
                             os.path.exists(os.path.join(model_path_str, "model.safetensors.index.json"))
            
            # 检查是否存在 pytorch 文件
            has_pytorch = os.path.exists(os.path.join(model_path_str, "pytorch_model.bin")) or \
                         os.path.exists(os.path.join(model_path_str, "pytorch_model.bin.index.json"))
            
            # 根据存在的文件类型决定使用哪种格式
            # 优先使用更安全的safetensors格式
            if has_safetensors:
                # 如果有safetensors文件，优先使用它（更安全）
                use_safetensors_param = True
                logger.info("检测到model.safetensors文件，优先使用safetensors格式加载模型（更安全）")
            elif has_pytorch:
                # 如果只有pytorch文件，则使用它
                use_safetensors_param = False
                logger.info("检测到pytorch文件，使用pytorch (.bin)格式加载模型")
            else:
                # 默认行为
                use_safetensors_param = True
                logger.info("未检测到特定格式文件，使用默认格式加载模型")
            # else:
            #     # 默认行为
            #     use_safetensors_param = True
            #     logger.info("未检测到模型文件，将尝试在线下载")
            
            logger.info(f"使用文件格式: {'safetensors' if use_safetensors_param else 'pytorch (.bin)'}")
            
            # 使用标准方法加载模型和处理器
            self.model = model_class.from_pretrained(
                model_path,
                use_safetensors=use_safetensors_param,
                local_files_only=offline_mode and os.path.exists(model_path)
            ).to(self.device)
            
            # 加载处理器
            self.processor = processor_class.from_pretrained(
                model_path,
                use_safetensors=use_safetensors_param,
                local_files_only=offline_mode and os.path.exists(model_path)
            )
            
            self.embedding_dim = self.model.config.projection_dim
            logger.info(f"模型加载成功，嵌入维度: {self.embedding_dim}")
            
        except Exception as e:
            error_msg = f"加载模型失败: {str(e)}"
            logger.error(error_msg)
            
            if offline_mode:
                raise RuntimeError(f"离线模式下{error_msg}。请确保本地模型文件存在且路径正确。")
            else:
                # 在线模式下如果失败，尝试使用备用模型
                try:
                    logger.warning("尝试使用备用模型...")
                    fallback_model = "openai/clip-vit-base-patch32"
                    self.model = model_class.from_pretrained(fallback_model, use_safetensors=True).to(self.device)
                    self.processor = processor_class.from_pretrained(fallback_model)
                    self.embedding_dim = self.model.config.projection_dim
                    logger.info(f"备用模型加载成功: {fallback_model}")
                except Exception as fallback_error:
                    logger.error(f"备用模型也加载失败: {str(fallback_error)}")
                    raise RuntimeError(f"{error_msg}。备用模型也加载失败: {str(fallback_error)}")

    @staticmethod
    def _normalize_l2(embeddings: np.ndarray) -> np.ndarray:
        """对单个或批量向量进行L2归一化。"""
        axis = 1 if len(embeddings.shape) > 1 else 0
        norms = np.linalg.norm(embeddings, axis=axis, keepdims=True)
        norms[norms == 0] = 1.0  # 防止除以零
        return embeddings / norms

    def _process_and_embed(
        self, processor_input: dict, feature_extractor
    ) -> np.ndarray:
        """通用辅助函数：处理输入、提取特征并进行归一化。"""
        logger.debug(f"处理输入: {processor_input}")
        inputs = self.processor(
            **processor_input, return_tensors="pt", padding=True
        ).to(self.device)
        logger.debug(f"输入张量形状: {inputs}")
        
        with torch.no_grad():
            raw_embeddings = feature_extractor(**inputs).cpu().numpy()

        logger.debug(f"原始嵌入形状: {raw_embeddings.shape}")
        normalized_embeddings = self._normalize_l2(raw_embeddings)
        logger.debug(f"归一化嵌入形状: {normalized_embeddings.shape}")
        return normalized_embeddings

    def image_to_embedding(self, image_path: str) -> np.ndarray:
        """提取单个图像的向量（自动L2归一化）。"""
        try:
            # 处理图像
            image = Image.open(image_path).convert("RGB")
            inputs = self.processor(
                images=image, return_tensors="pt", padding=True
            ).to(self.device)
            
            # 使用图像特征提取器
            with torch.no_grad():
                raw_embeddings = self.model.get_image_features(**inputs).cpu().numpy()

            # 验证输出是否有效
            if raw_embeddings is None:
                raise RuntimeError("图像特征提取返回None")
            
            normalized_embeddings = self._normalize_l2(raw_embeddings)
            return normalized_embeddings.squeeze()
        except Exception as e:
            raise RuntimeError(f"图像向量提取失败：{str(e)}")

    def text_to_embedding(self, text: str) -> np.ndarray:
        """生成单个文本的向量（自动L2归一化）。"""
        try:
            # 首先处理输入
            inputs = self.processor(
                text=[text], return_tensors="pt", padding=True
            ).to(self.device)
            
            # 检查输入是否有效
            for key, tensor in inputs.items():
                if tensor is None or tensor.nelement() == 0:
                    raise RuntimeError(f"处理器输入无效: {key} 为空")
            
            # 绕过有问题的get_text_features方法，直接使用模型内部组件
            with torch.no_grad():
                # 调用文本模型获取输出
                text_outputs = self.model.text_model(**inputs)
                
                # 获取最后一个隐藏状态
                last_hidden_state = text_outputs.last_hidden_state
                
                # 使用[CLS] token（第一个token）作为句子表示
                pooled_output = last_hidden_state[:, 0, :]  # [batch_size, hidden_size]
                
                # 应用文本投影
                raw_embeddings = self.model.text_projection(pooled_output).cpu().numpy()
                
                # 验证输出是否有效
                if raw_embeddings is None:
                    raise RuntimeError("文本特征提取返回None")
                
            normalized_embeddings = self._normalize_l2(raw_embeddings)
            return normalized_embeddings.squeeze()
        except Exception as e:
            # 记录详细错误信息
            logger.error(f"文本向量生成失败: {str(e)}")
            logger.error(f"错误类型: {type(e).__name__}")
            import traceback
            logger.error(f"完整错误堆栈: {traceback.format_exc()}")
            raise RuntimeError(f"文本向量生成失败：{str(e)}")

    def batch_image_to_embeddings(self, image_paths: List[str]) -> np.ndarray:
        """批量提取图像向量（自动L2归一化）。"""
        try:
            images = [Image.open(p).convert("RGB") for p in image_paths]
            inputs = self.processor(
                images=images, return_tensors="pt", padding=True
            ).to(self.device)
            
            with torch.no_grad():
                raw_embeddings = self.model.get_image_features(**inputs).cpu().numpy()

            # 验证输出是否有效
            if raw_embeddings is None:
                raise RuntimeError("批量图像特征提取返回None")
            
            normalized_embeddings = self._normalize_l2(raw_embeddings)
            return normalized_embeddings
        except Exception as e:
            raise RuntimeError(f"批量图像向量提取失败：{str(e)}")

    def batch_text_to_embeddings(self, texts: List[str]) -> np.ndarray:
        """批量生成文本向量（自动L2归一化）。"""
        try:
            inputs = self.processor(
                text=texts, return_tensors="pt", padding=True
            ).to(self.device)
            
            with torch.no_grad():
                raw_embeddings = self.model.get_text_features(**inputs).cpu().numpy()

            # 验证输出是否有效
            if raw_embeddings is None:
                raise RuntimeError("批量文本特征提取返回None")
            
            normalized_embeddings = self._normalize_l2(raw_embeddings)
            return normalized_embeddings
        except Exception as e:
            raise RuntimeError(f"批量文本向量生成失败：{str(e)}")

    @staticmethod
    def cosine_similarity(vec1: np.ndarray, vec2: np.ndarray) -> float:
        """计算两个向量的余弦相似度（自动处理归一化）。"""
        try:
            if vec1.shape != vec2.shape:
                raise ValueError("输入向量维度不匹配")

            norm1 = np.linalg.norm(vec1)
            norm2 = np.linalg.norm(vec2)

            if norm1 == 0 or norm2 == 0:
                return 0.0

            normalized_vec1 = vec1 / norm1
            normalized_vec2 = vec2 / norm2

            similarity = np.dot(normalized_vec1, normalized_vec2)
            return max(0.0, min(1.0, similarity))

        except Exception as e:
            raise RuntimeError(f"相似度计算失败: {str(e)}")

    def compute_similarity(
        self, embedding1: np.ndarray, embedding2: np.ndarray, metric: str = "cosine"
    ) -> float:
        """计算特征向量的相似度。"""
        from utils.numpy_utils import calculate_similarity

        return calculate_similarity(embedding1, embedding2, metric=metric)


class ClipModelWrapper(BaseClipModelWrapper):
    """标准的CLIP模型包装器。"""

    def __init__(
        self,
        model_name: str = "openai/clip-vit-base-patch32",
        device: Optional[str] = None,
        local_model_path: Optional[str] = None,
        offline_mode: bool = False,
    ):
        super().__init__(
            CLIPModel,
            CLIPProcessor,
            model_name,
            device,
            local_model_path,
            offline_mode
        )


class ChineseClipModelWrapper(BaseClipModelWrapper):
    """中文CLIP模型包装器。"""

    def __init__(
        self,
        model_name: str = "OFA-Sys/chinese-clip-vit-base-patch16",
        device: Optional[str] = None,
        local_model_path: Optional[str] = None,
        offline_mode: bool = False,
    ):
        super().__init__(
            ChineseCLIPModel,
            ChineseCLIPProcessor,
            model_name,
            device,
            local_model_path,
            offline_mode
        )
