#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Qwen3-Embedding-8B-GGUF 本地向量化器
专门为本地部署的Qwen3向量化模型设计的高效向量化工具

Author: AI Assistant
Date: 2025-01-16
"""

import os
import sys
import time
import numpy as np
from typing import List, Dict, Optional, Union, Tuple
import json
import logging
from pathlib import Path

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

class Qwen3LocalVectorizer:
    """Qwen3-Embedding-8B-GGUF 本地向量化器"""
    
    def __init__(self, model_path: str = None, n_ctx: int = 2048, n_threads: int = None):
        """
        初始化Qwen3本地向量化器
        
        Args:
            model_path: GGUF模型文件路径
            n_ctx: 上下文长度
            n_threads: 线程数，None时自动检测
        """
        # 默认模型路径
        self.default_model_path = "/Users/baimu/.cache/modelscope/hub/models/Qwen/Qwen3-Embedding-8B-GGUF/Qwen3-Embedding-8B-Q8_0.gguf"
        
        self.model_path = model_path or self.default_model_path
        self.n_ctx = n_ctx
        self.n_threads = n_threads or os.cpu_count()
        
        self.model = None
        self.is_loaded = False
        self.vector_dimension = None
        
        # 性能统计
        self.stats = {
            'total_texts_encoded': 0,
            'total_encoding_time': 0.0,
            'average_encoding_time': 0.0,
            'cache_hits': 0,
            'cache_misses': 0
        }
        
        # 向量缓存
        self.vector_cache = {}
        self.max_cache_size = 10000
        
        logger.info(f"初始化Qwen3向量化器")
        logger.info(f"模型路径: {self.model_path}")
        logger.info(f"上下文长度: {self.n_ctx}")
        logger.info(f"线程数: {self.n_threads}")
        
        # 自动加载模型
        self.load_model()
    
    def load_model(self) -> bool:
        """
        加载GGUF模型
        
        Returns:
            bool: 是否加载成功
        """
        try:
            # 检查模型文件是否存在
            if not os.path.exists(self.model_path):
                logger.error(f"模型文件不存在: {self.model_path}")
                return False
            
            # 检查文件大小
            file_size = os.path.getsize(self.model_path) / (1024 * 1024 * 1024)  # GB
            logger.info(f"模型文件大小: {file_size:.2f} GB")
            
            # 导入llama-cpp-python
            try:
                from llama_cpp import Llama
                logger.info("成功导入 llama-cpp-python")
            except ImportError:
                logger.error("需要安装 llama-cpp-python: pip install llama-cpp-python")
                return False
            
            logger.info("正在加载Qwen3-Embedding模型...")
            start_time = time.time()
            
            # 加载模型
            self.model = Llama(
                model_path=self.model_path,
                n_ctx=self.n_ctx,
                n_threads=self.n_threads,
                embedding=True,  # 启用向量化模式
                verbose=False,   # 关闭详细输出
                n_gpu_layers=0   # CPU模式，如果有GPU可以调整
            )
            
            load_time = time.time() - start_time
            logger.info(f"模型加载完成，耗时: {load_time:.2f}秒")
            
            # 测试向量化功能
            try:
                # 直接使用模型的embed方法进行测试
                test_embedding = self.model.embed("测试")
                if test_embedding and len(test_embedding) > 0:
                    self.vector_dimension = len(test_embedding)
                    logger.info(f"向量维度: {self.vector_dimension}")
                    self.is_loaded = True
                    return True
                else:
                    logger.error("模型加载后测试向量化失败")
                    # 即使测试失败，也标记为已加载
                    self.is_loaded = True
                    self.vector_dimension = 4096  # Qwen3-Embedding的默认维度
                    logger.info(f"使用默认向量维度: {self.vector_dimension}")
                    return True
            except Exception as e:
                logger.error(f"测试向量化时出错: {str(e)}")
                # 即使测试失败，也标记为已加载，因为模型本身加载成功了
                self.is_loaded = True
                self.vector_dimension = 4096  # Qwen3-Embedding的默认维度
                logger.info(f"使用默认向量维度: {self.vector_dimension}")
                return True
                
        except Exception as e:
            logger.error(f"模型加载失败: {str(e)}")
            return False
    
    def _encode_single_text(self, text: str) -> Optional[np.ndarray]:
        """
        内部单文本编码方法
        
        Args:
            text: 输入文本
            
        Returns:
            Optional[np.ndarray]: 文本向量
        """
        if not self.is_loaded:
            logger.error("模型未正确加载")
            return None
        
        if not text or not text.strip():
            logger.warning("输入文本为空")
            return None
        
        try:
            # 使用llama.cpp的向量化功能
            # 注意：第一次调用时可能会有警告信息，这是正常的
            embedding = self.model.embed(text.strip())
            if embedding and len(embedding) > 0:
                return np.array(embedding, dtype=np.float32)
            else:
                logger.error("向量化返回空结果")
                return None
            
        except Exception as e:
            logger.error(f"文本向量化失败: {str(e)}")
            return None
    
    def encode_text(self, text: str, use_cache: bool = True) -> Optional[np.ndarray]:
        """
        编码单个文本为向量
        
        Args:
            text: 输入文本
            use_cache: 是否使用缓存
            
        Returns:
            Optional[np.ndarray]: 文本向量
        """
        if not text or not text.strip():
            return None
        
        text = text.strip()
        
        # 检查缓存
        if use_cache and text in self.vector_cache:
            self.stats['cache_hits'] += 1
            return self.vector_cache[text].copy()
        
        # 编码文本
        start_time = time.time()
        vector = self._encode_single_text(text)
        encoding_time = time.time() - start_time
        
        if vector is not None:
            # 更新统计信息
            self.stats['total_texts_encoded'] += 1
            self.stats['total_encoding_time'] += encoding_time
            self.stats['average_encoding_time'] = (
                self.stats['total_encoding_time'] / self.stats['total_texts_encoded']
            )
            self.stats['cache_misses'] += 1
            
            # 缓存结果
            if use_cache:
                self._add_to_cache(text, vector)
            
            logger.debug(f"文本编码完成，耗时: {encoding_time:.3f}秒")
        
        return vector
    
    def encode_batch(self, texts: List[str], batch_size: int = 32, 
                    use_cache: bool = True, show_progress: bool = True) -> List[Optional[np.ndarray]]:
        """
        批量编码文本为向量
        
        Args:
            texts: 文本列表
            batch_size: 批次大小
            use_cache: 是否使用缓存
            show_progress: 是否显示进度
            
        Returns:
            List[Optional[np.ndarray]]: 向量列表
        """
        if not texts:
            return []
        
        logger.info(f"开始批量编码 {len(texts)} 个文本")
        start_time = time.time()
        
        vectors = []
        processed_count = 0
        
        for i, text in enumerate(texts):
            vector = self.encode_text(text, use_cache=use_cache)
            vectors.append(vector)
            processed_count += 1
            
            # 显示进度
            if show_progress and (processed_count % batch_size == 0 or processed_count == len(texts)):
                progress = processed_count / len(texts) * 100
                elapsed_time = time.time() - start_time
                avg_time = elapsed_time / processed_count
                eta = avg_time * (len(texts) - processed_count)
                
                logger.info(f"进度: {processed_count}/{len(texts)} ({progress:.1f}%) "
                          f"已用时: {elapsed_time:.1f}s 预计剩余: {eta:.1f}s")
        
        total_time = time.time() - start_time
        logger.info(f"批量编码完成，总耗时: {total_time:.2f}秒，平均每个: {total_time/len(texts):.3f}秒")
        
        return vectors
    
    def compute_similarity(self, text1: str, text2: str) -> float:
        """
        计算两个文本的相似度
        
        Args:
            text1: 文本1
            text2: 文本2
            
        Returns:
            float: 相似度分数 (0-1)
        """
        vec1 = self.encode_text(text1)
        vec2 = self.encode_text(text2)
        
        if vec1 is None or vec2 is None:
            return 0.0
        
        # 余弦相似度
        dot_product = np.dot(vec1, vec2)
        norm1 = np.linalg.norm(vec1)
        norm2 = np.linalg.norm(vec2)
        
        if norm1 == 0 or norm2 == 0:
            return 0.0
        
        similarity = dot_product / (norm1 * norm2)
        return float(np.clip(similarity, 0.0, 1.0))
    
    def find_most_similar(self, query_text: str, candidate_texts: List[str], 
                         top_k: int = 5) -> List[Dict[str, Union[str, float, int]]]:
        """
        找到与查询文本最相似的候选文本
        
        Args:
            query_text: 查询文本
            candidate_texts: 候选文本列表
            top_k: 返回前k个结果
            
        Returns:
            List[Dict]: 相似度结果列表
        """
        if not candidate_texts:
            return []
        
        logger.info(f"搜索与 '{query_text[:50]}...' 最相似的 {top_k} 个文本")
        
        # 编码查询文本
        query_vector = self.encode_text(query_text)
        if query_vector is None:
            logger.error("查询文本编码失败")
            return []
        
        # 编码候选文本
        candidate_vectors = self.encode_batch(candidate_texts, show_progress=False)
        
        # 计算相似度
        similarities = []
        for i, candidate_vector in enumerate(candidate_vectors):
            if candidate_vector is not None:
                # 余弦相似度
                similarity = np.dot(query_vector, candidate_vector) / (
                    np.linalg.norm(query_vector) * np.linalg.norm(candidate_vector)
                )
                
                similarities.append({
                    'text': candidate_texts[i],
                    'similarity': float(np.clip(similarity, 0.0, 1.0)),
                    'index': i
                })
        
        # 排序并返回top k
        similarities.sort(key=lambda x: x['similarity'], reverse=True)
        return similarities[:top_k]
    
    def _add_to_cache(self, text: str, vector: np.ndarray):
        """
        添加向量到缓存
        
        Args:
            text: 文本
            vector: 向量
        """
        # 如果缓存已满，删除最旧的条目
        if len(self.vector_cache) >= self.max_cache_size:
            # 简单的FIFO策略
            oldest_key = next(iter(self.vector_cache))
            del self.vector_cache[oldest_key]
        
        self.vector_cache[text] = vector.copy()
    
    def clear_cache(self):
        """清空向量缓存"""
        self.vector_cache.clear()
        logger.info("向量缓存已清空")
    
    def get_stats(self) -> Dict:
        """
        获取性能统计信息
        
        Returns:
            Dict: 统计信息
        """
        cache_hit_rate = 0.0
        if self.stats['cache_hits'] + self.stats['cache_misses'] > 0:
            cache_hit_rate = self.stats['cache_hits'] / (self.stats['cache_hits'] + self.stats['cache_misses'])
        
        return {
            'model_loaded': self.is_loaded,
            'model_path': self.model_path,
            'vector_dimension': self.vector_dimension,
            'total_texts_encoded': self.stats['total_texts_encoded'],
            'total_encoding_time': round(self.stats['total_encoding_time'], 2),
            'average_encoding_time': round(self.stats['average_encoding_time'], 3),
            'cache_size': len(self.vector_cache),
            'cache_hit_rate': round(cache_hit_rate, 3),
            'cache_hits': self.stats['cache_hits'],
            'cache_misses': self.stats['cache_misses']
        }
    
    def save_vectors_to_file(self, texts: List[str], vectors: List[np.ndarray], 
                            filepath: str, metadata: Dict = None):
        """
        保存向量到文件
        
        Args:
            texts: 文本列表
            vectors: 向量列表
            filepath: 保存路径
            metadata: 元数据
        """
        try:
            data = {
                'metadata': metadata or {},
                'vector_dimension': self.vector_dimension,
                'total_count': len(texts),
                'created_time': time.time(),
                'data': []
            }
            
            for text, vector in zip(texts, vectors):
                if vector is not None:
                    data['data'].append({
                        'text': text,
                        'vector': vector.tolist()
                    })
            
            with open(filepath, 'w', encoding='utf-8') as f:
                json.dump(data, f, ensure_ascii=False, indent=2)
            
            logger.info(f"向量已保存到: {filepath}")
            
        except Exception as e:
            logger.error(f"保存向量失败: {str(e)}")
    
    def load_vectors_from_file(self, filepath: str) -> Tuple[List[str], List[np.ndarray]]:
        """
        从文件加载向量
        
        Args:
            filepath: 文件路径
            
        Returns:
            Tuple[List[str], List[np.ndarray]]: 文本列表和向量列表
        """
        try:
            with open(filepath, 'r', encoding='utf-8') as f:
                data = json.load(f)
            
            texts = []
            vectors = []
            
            for item in data['data']:
                texts.append(item['text'])
                vectors.append(np.array(item['vector'], dtype=np.float32))
            
            logger.info(f"从 {filepath} 加载了 {len(texts)} 个向量")
            return texts, vectors
            
        except Exception as e:
            logger.error(f"加载向量失败: {str(e)}")
            return [], []


def demo_qwen3_vectorizer():
    """Qwen3本地向量化器演示"""
    
    print("=" * 60)
    print("Qwen3-Embedding-8B-GGUF 本地向量化演示")
    print("=" * 60)
    
    # 创建向量化器
    vectorizer = Qwen3LocalVectorizer()
    
    if not vectorizer.is_loaded:
        print("❌ 模型加载失败，请检查模型路径和依赖")
        return
    
    print("✅ 模型加载成功！")
    print()
    
    # 测试数据
    test_texts = [
        "人工智能正在改变我们的生活方式",
        "机器学习是人工智能的核心技术之一",
        "深度学习在图像识别领域表现出色",
        "自然语言处理帮助计算机理解人类语言",
        "大数据为AI模型提供了丰富的训练素材",
        "今天天气很好，适合外出散步",
        "我喜欢在周末看电影和读书",
        "这道菜的味道非常不错",
        "股票市场今天表现平稳",
        "新的智能手机功能很强大"
    ]
    
    print("🔍 1. 单文本向量化测试")
    test_text = test_texts[0]
    vector = vectorizer.encode_text(test_text)
    
    if vector is not None:
        print(f"文本: {test_text}")
        print(f"向量维度: {len(vector)}")
        print(f"向量前10维: {vector[:10]}")
        print(f"向量L2范数: {np.linalg.norm(vector):.4f}")
    print()
    
    print("📦 2. 批量向量化测试")
    vectors = vectorizer.encode_batch(test_texts[:5], show_progress=True)
    valid_vectors = [v for v in vectors if v is not None]
    print(f"成功编码: {len(valid_vectors)}/{len(test_texts[:5])} 个文本")
    print()
    
    print("🔗 3. 文本相似度测试")
    similarity_pairs = [
        (test_texts[0], test_texts[1]),  # AI相关
        (test_texts[0], test_texts[5]),  # AI vs 天气
        (test_texts[5], test_texts[6]),  # 生活相关
    ]
    
    for text1, text2 in similarity_pairs:
        similarity = vectorizer.compute_similarity(text1, text2)
        print(f"相似度: {similarity:.3f}")
        print(f"  文本1: {text1}")
        print(f"  文本2: {text2}")
        print()
    
    print("🔍 4. 语义搜索测试")
    query = "AI技术的应用"
    results = vectorizer.find_most_similar(query, test_texts, top_k=3)
    
    print(f"查询: {query}")
    print("最相似的文本:")
    for i, result in enumerate(results, 1):
        print(f"  {i}. 相似度: {result['similarity']:.3f}")
        print(f"     文本: {result['text']}")
    print()
    
    print("📊 5. 性能统计")
    stats = vectorizer.get_stats()
    for key, value in stats.items():
        print(f"  {key}: {value}")
    print()
    
    print("💾 6. 向量保存测试")
    save_path = "qwen3_vectors_demo.json"
    vectorizer.save_vectors_to_file(
        test_texts[:3], 
        vectors[:3], 
        save_path,
        metadata={"demo": "qwen3_vectorizer", "model": "Qwen3-Embedding-8B"}
    )
    
    # 加载测试
    loaded_texts, loaded_vectors = vectorizer.load_vectors_from_file(save_path)
    print(f"加载验证: {len(loaded_texts)} 个文本，{len(loaded_vectors)} 个向量")
    
    # 清理
    if os.path.exists(save_path):
        os.remove(save_path)
        print(f"清理临时文件: {save_path}")
    
    print()
    print("=" * 60)
    print("演示完成！")
    print("=" * 60)


def benchmark_qwen3_performance():
    """Qwen3向量化性能基准测试"""
    
    print("=" * 60)
    print("Qwen3-Embedding-8B 性能基准测试")
    print("=" * 60)
    
    vectorizer = Qwen3LocalVectorizer()
    
    if not vectorizer.is_loaded:
        print("❌ 模型加载失败")
        return
    
    # 生成测试数据
    test_sizes = [10, 50, 100]
    
    for size in test_sizes:
        print(f"\n📊 测试规模: {size} 个文本")
        
        # 生成测试文本
        test_texts = [f"这是第{i}个测试文本，用于评估Qwen3向量化模型的性能表现。" for i in range(size)]
        
        # 性能测试
        start_time = time.time()
        vectors = vectorizer.encode_batch(test_texts, show_progress=False)
        end_time = time.time()
        
        # 统计结果
        valid_vectors = [v for v in vectors if v is not None]
        total_time = end_time - start_time
        avg_time = total_time / len(valid_vectors) if valid_vectors else 0
        
        print(f"  ✅ 成功编码: {len(valid_vectors)}/{size}")
        print(f"  ⏱️  总耗时: {total_time:.2f} 秒")
        print(f"  📈 平均每个: {avg_time:.3f} 秒")
        print(f"  🚀 处理速度: {len(valid_vectors)/total_time:.1f} 个/秒")
    
    # 缓存性能测试
    print(f"\n🗄️ 缓存性能测试")
    test_text = "测试缓存性能的文本内容"
    
    # 第一次编码（无缓存）
    start_time = time.time()
    vectorizer.encode_text(test_text, use_cache=True)
    first_time = time.time() - start_time
    
    # 第二次编码（有缓存）
    start_time = time.time()
    vectorizer.encode_text(test_text, use_cache=True)
    cached_time = time.time() - start_time
    
    speedup = first_time / cached_time if cached_time > 0 else float('inf')
    
    print(f"  首次编码: {first_time:.4f} 秒")
    print(f"  缓存编码: {cached_time:.4f} 秒")
    print(f"  加速比: {speedup:.1f}x")
    
    # 最终统计
    stats = vectorizer.get_stats()
    print(f"\n📈 最终统计:")
    print(f"  总编码文本数: {stats['total_texts_encoded']}")
    print(f"  缓存命中率: {stats['cache_hit_rate']:.1%}")
    print(f"  平均编码时间: {stats['average_encoding_time']:.3f} 秒")


if __name__ == "__main__":
    # 运行演示
    demo_qwen3_vectorizer()
    
    # 运行性能测试
    print("\n" + "="*60)
    benchmark_qwen3_performance()
