import torch
import logging
from PIL import Image
import numpy as np
from transformers import BertTokenizer, BertModel
import open_clip

logger = logging.getLogger(__name__)

class TaiyiCLIPAdapter:
    """
    适配器类，使IDEA-CCNL/Taiyi-CLIP-RoBERTa-102M-ViT-L-Chinese模型兼容项目
    该模型使用BertModel作为文本编码器，ViT-L-14作为图像编码器
    """
    def __init__(self, model_name, device="cuda"):
        """
        初始化适配器
        :param model_name: 模型名称，如'IDEA-CCNL/Taiyi-CLIP-RoBERTa-102M-ViT-L-Chinese'
        :param device: 运行设备，如'cuda'、'cpu'等
        """
        self.model_name = model_name
        self.device = device
        
        logger.info(f"加载文本编码器：{model_name}")
        self.text_tokenizer = BertTokenizer.from_pretrained(model_name)
        self.text_encoder = BertModel.from_pretrained(model_name).to(device)
        self.text_encoder.eval()
        
        logger.info(f"加载图像编码器：ViT-L-14")
        self.clip_model, _, self.processor = open_clip.create_model_and_transforms('ViT-L-14', pretrained='openai')
        self.clip_model = self.clip_model.to(device)
        self.clip_model.eval()
        
        logger.info(f"Taiyi CLIP 适配器加载完成")
    
    def get_image_features(self, pixel_values):
        """
        获取图像特征
        :param pixel_values: 图像像素值
        :return: 图像特征向量
        """
        with torch.no_grad():
            image_features = self.clip_model.encode_image(pixel_values)
            # 归一化
            image_features = image_features / image_features.norm(dim=1, keepdim=True)
        return image_features
    
    def get_text_features(self, input_ids):
        """
        获取文本特征
        :param input_ids: 文本token ids
        :return: 文本特征向量
        """
        with torch.no_grad():
            # Taiyi-CLIP的文本编码器使用BertModel的[CLS]输出作为特征
            text_features = self.text_encoder(input_ids)[1]  # 获取pooled output
            # 归一化
            text_features = text_features / text_features.norm(dim=1, keepdim=True)
        return text_features
    
    def __call__(self, images=None, text=None, return_tensors="pt", padding=True):
        """
        处理输入数据
        :param images: 图像数据
        :param text: 文本数据
        :param return_tensors: 返回张量类型
        :param padding: 是否填充
        :return: 处理后的数据
        """
        result = {}
        
        if images is not None:
            try:
                if isinstance(images, list):
                    # 处理图像列表
                    pil_images = []
                    for img in images:
                        if isinstance(img, np.ndarray):
                            # 转换为PIL图像
                            pil_img = Image.fromarray(img.astype('uint8'))
                            pil_images.append(pil_img)
                        elif isinstance(img, Image.Image):
                            pil_images.append(img)
                        else:
                            logger.error(f"不支持的图像类型: {type(img)}")
                            raise TypeError(f"不支持的图像类型: {type(img)}")
                    
                    # 处理PIL图像列表
                    batch_tensors = []
                    for img in pil_images:
                        # 对每个图像单独应用处理器
                        tensor = self.processor(img)
                        batch_tensors.append(tensor)
                    
                    processed_images = torch.stack(batch_tensors)
                elif isinstance(images, np.ndarray):
                    # 单个numpy数组
                    pil_image = Image.fromarray(images.astype('uint8'))
                    processed_images = self.processor(pil_image).unsqueeze(0)
                elif isinstance(images, Image.Image):
                    # 单个PIL图像
                    processed_images = self.processor(images).unsqueeze(0)
                else:
                    logger.error(f"不支持的图像类型: {type(images)}")
                    raise TypeError(f"不支持的图像类型: {type(images)}")
                
                result["pixel_values"] = processed_images.to(self.device)
            except Exception as e:
                logger.error(f"处理图像时出错: {str(e)}")
                raise
        
        if text is not None:
            # 使用text_tokenizer处理文本
            encoded_text = self.text_tokenizer(text, return_tensors=return_tensors, padding=padding)
            result["input_ids"] = encoded_text["input_ids"].to(self.device)
        
        return result


def create_taiyi_clip_adapter(model_name, device):
    """
    创建Taiyi CLIP适配器的工厂函数
    :param model_name: 模型名称
    :param device: 运行设备
    :return: 适配器实例
    """
    return TaiyiCLIPAdapter(model_name, device) 