from typing import List
import torch
from sentence_transformers import SentenceTransformer
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
from agentscope.model import ChatModelBase
from agentscope.embedding import EmbeddingModelBase

class SentenceTransformerEmbedding(EmbeddingModelBase):
    """使用 SentenceTransformer 的嵌入模型"""
    
    def __init__(self, model_name: str = "BAAI/bge-small-zh"):
        self.model = SentenceTransformer(model_name)
        # 使用 CPU 运行小模型以节省显存
        self.model = self.model.to("cpu")
    
    async def embed_texts(self, texts: List[str]) -> List[List[float]]:
        """将文本转换为向量"""
        # 批处理以减少内存使用
        batch_size = 32
        all_embeddings = []
        
        for i in range(0, len(texts), batch_size):
            batch = texts[i:i + batch_size]
            embeddings = self.model.encode(
                batch,
                convert_to_tensor=True,
                show_progress_bar=False
            )
            all_embeddings.extend(embeddings.numpy().tolist())
        
        return all_embeddings

class ChatGLMModel(ChatModelBase):
    """使用 ChatGLM2-6B-int4 的聊天模型"""
    
    def __init__(self, model_name: str = "THUDM/chatglm2-6b-int4"):
        self.tokenizer = AutoTokenizer.from_pretrained(
            model_name,
            trust_remote_code=True,
            revision="v1.0"
        )
        self.model = AutoModelForCausalLM.from_pretrained(
            model_name,
            trust_remote_code=True,
            revision="v1.0",
            # 使用 int4 量化和 CPU 卸载以减少显存使用
            torch_dtype=torch.float16,
            device_map={
                "transformer.word_embeddings": "cpu",
                "transformer.final_layernorm": "cpu",
                "transformer.prefix_encoder": "cpu",
                "lm_head": "cpu"
            }
        )
    
    async def chat(self, messages: List[dict]) -> str:
        """处理聊天消息"""
        # 将消息格式化为模型输入
        prompt = "\n".join([
            f"{msg['role']}: {msg['content']}"
            for msg in messages
        ])
        
        # 使用模型的 chat 接口而不是 pipeline 以减少内存使用
        response, _ = self.model.chat(
            self.tokenizer,
            prompt,
            max_length=1024,  # 减小最大长度以节省内存
            temperature=0.7,
            top_p=0.9,
        )
        
        return response.strip()

# 创建模型实例
embedding_model = SentenceTransformerEmbedding()
chat_model = ChatGLMModel()
