# 首先安装必要的库
# pip install transformers torch numpy

from transformers import AutoModel, AutoTokenizer
import torch
import numpy as np
import json

class BGEM3Embedder:
    def __init__(self, model_name="D:/ideaSpace/MyPython/models/bge-m3"):
        """
        初始化BGE-M3嵌入生成器
        """
        self.model_name = model_name
        self.tokenizer = AutoTokenizer.from_pretrained(model_name)
        self.model = AutoModel.from_pretrained(model_name, trust_remote_code=True)

        # 设置设备
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model.to(self.device)
        self.model.eval()

        print(f"模型加载完成，使用设备: {self.device}")
        print(f"模型架构: {type(self.model).__name__}")

    def get_embeddings(self, texts, max_length=512):
        """
        生成三种类型的嵌入：密集嵌入、稀疏嵌入、多向量嵌入
        """
        # 对文本进行分词 - 移除 return_offsets_mapping
        encoded_input = self.tokenizer(
            texts,
            padding=True,
            truncation=True,
            return_tensors='pt',
            max_length=max_length
        )

        # 移至设备
        encoded_input = {k: v.to(self.device) for k, v in encoded_input.items()}

        with torch.no_grad():
            # 模型前向传播 - 使用正确的参数
            outputs = self.model(**encoded_input, return_dict=True)

            # BGE-M3 的特殊输出处理
            dense_embeddings = outputs.last_hidden_state[:, 0]  # [CLS] token作为句子表示

            # 处理稀疏嵌入 - 检查模型输出中是否有 sparse_embeddings
            if hasattr(outputs, 'sparse_embeddings'):
                sparse_embeddings = outputs.sparse_embeddings
            else:
                # 如果模型没有直接输出稀疏嵌入，我们可以模拟一个简单的版本
                sparse_embeddings = self._create_dummy_sparse_embeddings(encoded_input['input_ids'])

            # 归一化密集嵌入
            dense_embeddings = torch.nn.functional.normalize(dense_embeddings, p=2, dim=1)

            # 多向量嵌入（所有token的表示）
            colbert_embeddings = outputs.last_hidden_state
            colbert_embeddings = torch.nn.functional.normalize(colbert_embeddings, p=2, dim=2)

        return {
            'dense': dense_embeddings.cpu().numpy(),
            'sparse': sparse_embeddings.cpu(),
            'colbert': colbert_embeddings.cpu().numpy(),
            'input_ids': encoded_input['input_ids'].cpu().numpy(),
            'attention_mask': encoded_input['attention_mask'].cpu().numpy()
        }

    def _create_dummy_sparse_embeddings(self, input_ids):
        """
        创建模拟的稀疏嵌入（用于测试）
        """
        batch_size, seq_len = input_ids.shape
        # 创建简单的稀疏权重（基于token id的简单函数）
        sparse_weights = torch.ones_like(input_ids, dtype=torch.float32) * 0.1
        # 给非padding token更高的权重
        sparse_weights[input_ids != self.tokenizer.pad_token_id] = 0.5
        return sparse_weights

    def process_sparse_embeddings(self, sparse_embeddings, input_ids, top_k=10):
        """
        处理稀疏嵌入，提取重要的词汇权重
        """
        results = []

        for i in range(sparse_embeddings.shape[0]):
            # 获取当前样本的稀疏权重
            weights = sparse_embeddings[i]

            # 获取对应的token IDs
            token_ids = input_ids[i]

            # 选择权重最高的top_k个token
            valid_indices = (token_ids != self.tokenizer.pad_token_id) & (token_ids != self.tokenizer.cls_token_id) & (token_ids != self.tokenizer.sep_token_id)
            valid_weights = weights[valid_indices]
            valid_token_ids = token_ids[valid_indices]

            if len(valid_weights) > 0:
                top_k_val = min(top_k, len(valid_weights))
                top_indices = torch.topk(valid_weights, k=top_k_val).indices

                sparse_info = []
                for idx in top_indices:
                    token_id = valid_token_ids[idx].item()
                    token = self.tokenizer.decode([token_id])
                    weight = valid_weights[idx].item()

                    sparse_info.append({
                        'token': token.strip(),
                        'weight': weight,
                        'token_id': token_id
                    })

                # 按权重降序排序
                sparse_info.sort(key=lambda x: x['weight'], reverse=True)
                results.append(sparse_info)
            else:
                results.append([])

        return results

    def calculate_dense_similarity(self, emb1, emb2):
        """计算密集嵌入的余弦相似度"""
        return np.dot(emb1, emb2.T)

    def calculate_sparse_similarity(self, sparse_list1, sparse_list2):
        """计算稀疏嵌入的相似度"""
        similarities = []
        for sparse1 in sparse_list1:
            row_sims = []
            for sparse2 in sparse_list2:
                # 创建词汇到权重的映射
                weights1 = {item['token']: item['weight'] for item in sparse1}
                weights2 = {item['token']: item['weight'] for item in sparse2}

                # 计算共享词汇的权重乘积和
                shared_tokens = set(weights1.keys()) & set(weights2.keys())
                similarity = sum(weights1[token] * weights2[token] for token in shared_tokens)
                row_sims.append(similarity)
            similarities.append(row_sims)
        return np.array(similarities)

    def calculate_colbert_similarity(self, colbert_embs1, colbert_embs2, attention_masks1, attention_masks2):
        """计算多向量嵌入的相似度"""
        similarities = []
        for i, emb1 in enumerate(colbert_embs1):
            row_sims = []
            mask1 = attention_masks1[i]
            # 移除padding部分
            emb1_valid = emb1[mask1 == 1]

            for j, emb2 in enumerate(colbert_embs2):
                mask2 = attention_masks2[j]
                emb2_valid = emb2[mask2 == 1]

                if len(emb1_valid) > 0 and len(emb2_valid) > 0:
                    # 计算所有token对之间的相似度矩阵
                    sim_matrix = np.dot(emb1_valid, emb2_valid.T)
                    # 取每行的最大值然后平均（简化版的ColBERT相似度）
                    max_sim = np.max(sim_matrix, axis=1)
                    similarity = np.mean(max_sim)
                    row_sims.append(similarity)
                else:
                    row_sims.append(0.0)
            similarities.append(row_sims)
        return np.array(similarities)

def main():
    # 初始化嵌入生成器
    embedder = BGEM3Embedder()

    # 示例文本 - 使用更简单的文本确保正常运行
    texts = [
        "machine learning is important",
        "deep learning uses neural networks",
        "good weather today",
        "natural language processing"
    ]

    print("生成嵌入中...")
    try:
        # 生成所有类型的嵌入
        results = embedder.get_embeddings(texts)

        # 处理稀疏嵌入
        sparse_results = embedder.process_sparse_embeddings(
            results['sparse'],
            results['input_ids'],
            top_k=5
        )

        # 打印结果
        print("\n" + "="*50)
        print("BGE-M3 多类型嵌入生成结果")
        print("="*50)

        for i, text in enumerate(texts):
            print(f"\n文本 {i+1}: {text}")
            print(f"密集嵌入形状: {results['dense'][i].shape}")
            print(f"多向量嵌入形状: {results['colbert'][i].shape}")

            if sparse_results[i]:
                print("最重要的词汇权重（稀疏嵌入）:")
                for j, item in enumerate(sparse_results[i][:3]):  # 只显示前3个最重要的词汇
                    print(f"  {j+1}. '{item['token']}': {item['weight']:.4f}")
            else:
                print("无有效的稀疏嵌入信息")

        # 计算相似度矩阵
        print("\n" + "="*50)
        print("相似度矩阵比较")
        print("="*50)

        # 密集嵌入相似度
        dense_sim = embedder.calculate_dense_similarity(results['dense'], results['dense'])
        print("\n密集嵌入相似度矩阵:")
        print(np.round(dense_sim, 3))

        # 稀疏嵌入相似度
        sparse_sim = embedder.calculate_sparse_similarity(sparse_results, sparse_results)
        print("\n稀疏嵌入相似度矩阵:")
        print(np.round(sparse_sim, 3))

        # 多向量嵌入相似度
        colbert_sim = embedder.calculate_colbert_similarity(
            results['colbert'], results['colbert'],
            results['attention_mask'], results['attention_mask']
        )
        print("\n多向量嵌入相似度矩阵:")
        print(np.round(colbert_sim, 3))

        # 保存嵌入结果
        save_results = {
            'texts': texts,
            'dense_embeddings': [emb.tolist() for emb in results['dense']],
            'sparse_embeddings': sparse_results,
            'colbert_embeddings': [emb.tolist() for emb in results['colbert']]
        }

        with open('bge_m3_embeddings.json', 'w', encoding='utf-8') as f:
            json.dump(save_results, f, ensure_ascii=False, indent=2)

        print(f"\n结果已保存到 'bge_m3_embeddings.json'")

    except Exception as e:
        print(f"发生错误: {e}")
        print("尝试简化模型调用...")
        simplified_example(embedder)

def simplified_example(embedder):
    """简化版的示例"""
    print("\n运行简化示例...")

    text = "machine learning"
    encoded_input = embedder.tokenizer(text, return_tensors='pt').to(embedder.device)

    with torch.no_grad():
        outputs = embedder.model(**encoded_input, return_dict=True)
        dense_embedding = outputs.last_hidden_state[:, 0]
        dense_embedding = torch.nn.functional.normalize(dense_embedding, p=2, dim=1)

        print(f"文本: {text}")
        print(f"密集嵌入形状: {dense_embedding.shape}")
        print(f"密集嵌入前5个值: {dense_embedding[0][:5].cpu().numpy()}")

if __name__ == "__main__":
    main()