# -*- coding: utf-8 -*-
"""
Ollama客户端工具类
用于与本地qwq:latest模型进行交互
"""

import requests
import json
import time
from typing import Dict, List, Optional, Any
from loguru import logger
from config import OLLAMA_CONFIG


class OllamaClient:
    """Ollama客户端，用于embedding和rerank功能"""
    
    def __init__(self, base_url: str = None, embedding_model: str = None, rerank_model: str = None, timeout: int = 30):
        """
        初始化Ollama客户端
        
        Args:
            base_url: Ollama服务地址
            embedding_model: embedding模型名称
            rerank_model: rerank模型名称
            timeout: 请求超时时间
        """
        self.base_url = base_url or OLLAMA_CONFIG["base_url"]
        self.embedding_model = embedding_model or OLLAMA_CONFIG["embedding_model"]
        self.rerank_model = rerank_model or OLLAMA_CONFIG["rerank_model"]
        self.timeout = timeout or OLLAMA_CONFIG["timeout"]
        self.session = requests.Session()
        
        # 验证连接
        self._verify_connection()
    
    def _verify_connection(self) -> bool:
        """
        验证与Ollama服务的连接
        
        Returns:
            bool: 连接是否成功
        """
        try:
            response = self.session.get(f"{self.base_url}/api/tags", timeout=5)
            if response.status_code == 200:
                models = response.json().get("models", [])
                model_names = [model["name"] for model in models]
                embedding_available = self.embedding_model in model_names
                rerank_available = self.rerank_model in model_names
                
                if not embedding_available:
                    logger.warning(f"⚠️ Embedding模型 {self.embedding_model} 不可用")
                if not rerank_available:
                    logger.warning(f"⚠️ Rerank模型 {self.rerank_model} 不可用")
                    
                if embedding_available and rerank_available:
                    logger.info(f"✅ 成功连接到Ollama服务，embedding和rerank模型可用")
                    return True
                else:
                    logger.warning(f"⚠️ 部分模型不可用，可用模型: {model_names}")
                    return False
            else:
                logger.error(f"❌ 无法连接到Ollama服务: {response.status_code}")
                return False
        except Exception as e:
            logger.error(f"❌ 连接Ollama服务失败: {e}")
            return False
    
    def get_embedding(self, text: str) -> List[float]:
        """
        获取文本的向量表示
        
        Args:
            text: 输入文本
        
        Returns:
            List[float]: 文本的向量表示
        """
        try:
            data = {
                "model": self.embedding_model,
                "prompt": text
            }
            
            response = self.session.post(
                f"{self.base_url}/api/embeddings",
                json=data,
                timeout=self.timeout
            )
            
            if response.status_code == 200:
                result = response.json()
                return result.get("embedding", [])
            else:
                logger.error(f"获取embedding失败: {response.status_code} - {response.text}")
                return []
                
        except Exception as e:
            logger.error(f"获取embedding异常: {e}")
            return []
    
    def get_batch_embeddings(self, texts: List[str]) -> List[List[float]]:
        """
        批量获取文本的向量表示
        
        Args:
            texts: 文本列表
        
        Returns:
            List[List[float]]: 文本向量列表
        """
        embeddings = []
        for text in texts:
            embedding = self.get_embedding(text)
            embeddings.append(embedding)
        return embeddings
    
    def rerank_documents(self, query: str, documents: List[str], top_k: int = 5) -> List[tuple]:
        """
        对文档进行重排序
        
        Args:
            query: 查询文本
            documents: 文档列表
            top_k: 返回前k个结果
        
        Returns:
            List[tuple]: (文档索引, 相关性分数)的列表
        """
        try:
            # 构造rerank请求
            data = {
                "model": self.rerank_model,
                "query": query,
                "documents": documents,
                "top_k": top_k
            }
            
            response = self.session.post(
                f"{self.base_url}/api/rerank",
                json=data,
                timeout=self.timeout
            )
            
            if response.status_code == 200:
                result = response.json()
                return [(item["index"], item["relevance_score"]) for item in result.get("results", [])]
            else:
                logger.error(f"Rerank失败: {response.status_code} - {response.text}")
                # 如果rerank失败，返回原始顺序
                return [(i, 1.0 - i * 0.1) for i in range(min(len(documents), top_k))]
                
        except Exception as e:
            logger.error(f"Rerank异常: {e}")
            # 如果rerank失败，返回原始顺序
            return [(i, 1.0 - i * 0.1) for i in range(min(len(documents), top_k))]
    
    def calculate_similarity(self, text1: str, text2: str) -> float:
        """
        计算两个文本的语义相似度
        
        Args:
            text1: 文本1
            text2: 文本2
        
        Returns:
            float: 相似度分数 (0-1)
        """
        try:
            embedding1 = self.get_embedding(text1)
            embedding2 = self.get_embedding(text2)
            
            if not embedding1 or not embedding2:
                return 0.0
            
            # 计算余弦相似度
            import numpy as np
            
            vec1 = np.array(embedding1)
            vec2 = np.array(embedding2)
            
            # 计算余弦相似度
            dot_product = np.dot(vec1, vec2)
            norm1 = np.linalg.norm(vec1)
            norm2 = np.linalg.norm(vec2)
            
            if norm1 == 0 or norm2 == 0:
                return 0.0
            
            similarity = dot_product / (norm1 * norm2)
            return max(0.0, min(1.0, similarity))  # 确保在0-1范围内
            
        except Exception as e:
            logger.error(f"计算相似度异常: {e}")
            return 0.0
    
    def extract_keywords_simple(self, text: str, max_keywords: int = 10) -> List[str]:
        """
        简单的关键词提取（基于词频和长度）
        
        Args:
            text: 输入文本
            max_keywords: 最大关键词数量
        
        Returns:
            List[str]: 关键词列表
        """
        import re
        from collections import Counter
        
        # 简单的中文分词和关键词提取
        words = re.findall(r'[\u4e00-\u9fa5]+', text)
        # 过滤长度小于2的词
        words = [word for word in words if len(word) >= 2]
        
        # 统计词频
        word_counts = Counter(words)
        
        # 返回最频繁的关键词
        keywords = [word for word, count in word_counts.most_common(max_keywords)]
        return keywords


# 全局客户端实例
ollama_client = OllamaClient()


if __name__ == "__main__":
    # 测试客户端
    client = OllamaClient()
    
    # 测试embedding
    embedding = client.get_embedding("大伙房水库是辽宁省重要的水利工程")
    print(f"Embedding维度: {len(embedding) if embedding else 0}")
    
    # 测试关键词提取
    keywords = client.extract_keywords_simple("大伙房水库是辽宁省重要的水利工程，具有防洪、供水等多种功能")
    print(f"关键词: {keywords}")
    
    # 测试语义相似度
    similarity = client.calculate_similarity("水库的防洪功能", "水库用于防洪")
    print(f"相似度: {similarity}")
    
    # 测试rerank
    documents = ["水库防洪功能介绍", "水库供水系统", "水库生态保护"]
    rerank_results = client.rerank_documents("防洪", documents, top_k=2)
    print(f"Rerank结果: {rerank_results}")