'''
Author: Diana Tang
Date: 2025-04-04 22:39:41
LastEditors: Diana Tang
Description: some description
FilePath: /pub-scholar-assist/app/model_loader.py
'''
import os
import yaml
import torch
import codecs
import requests
from sentence_transformers import SentenceTransformer

class ModelLoader:
    def __init__(self, config_path="config/config.yaml"):
        # 加载配置，使用UTF-8编码
        with codecs.open(config_path, 'r', encoding='utf-8') as f:
            self.config = yaml.safe_load(f)
        
        self.embedding_model = None
        
        # 设置环境变量，启用离线模式
        os.environ['TRANSFORMERS_OFFLINE'] = '1'
        os.environ['HF_HUB_OFFLINE'] = '1'
    
    def load_embedding_model(self):
        """加载嵌入模型"""
        print("Loading embedding model...")
        
        try:
            # 使用一个轻量级的嵌入模型
            model_name = "sentence-transformers/all-MiniLM-L6-v2"
            self.embedding_model = SentenceTransformer(model_name, device='cpu')
            print("Embedding model loaded successfully!")
            return self.embedding_model
        except Exception as e:
            print(f"Error loading model: {str(e)}")
            print("\n请确保：")
            print("1. 已安装 sentence-transformers: pip install sentence-transformers")
            print("2. 有网络连接可以下载模型")
            raise
    
    def check_ollama_status(self):
        """检查Ollama服务状态"""
        try:
            base_url = self.config.get('ollama', {}).get('base_url', 'http://localhost:11434')
            response = requests.get(f"{base_url}/api/version")
            if response.status_code == 200:
                version_info = response.json()
                print(f"Ollama service is running. Version: {version_info.get('version', 'unknown')}")
                return True
            else:
                print(f"Ollama service responded with status code: {response.status_code}")
                return False
        except requests.exceptions.RequestException as e:
            print(f"Failed to connect to Ollama service: {e}")
            return False
    
    def list_ollama_models(self):
        """列出所有可用的Ollama模型"""
        try:
            base_url = self.config.get('ollama', {}).get('base_url', 'http://localhost:11434')
            response = requests.get(f"{base_url}/api/tags")
            
            if response.status_code == 200:
                models = response.json().get('models', [])
                print("Available Ollama models:")
                for model in models:
                    print(f"- {model.get('name')}")
                
                return models
            else:
                print(f"Failed to list models. Status code: {response.status_code}")
                return []
        except requests.exceptions.RequestException as e:
            print(f"Failed to connect to Ollama service: {e}")
            return []

class OllamaEmbeddingModel:
    """使用Ollama API进行文本嵌入"""
    def __init__(self, base_url, model_name):
        self.base_url = base_url
        self.model_name = model_name
    
    def encode(self, texts):
        """获取文本嵌入"""
        if isinstance(texts, str):
            texts = [texts]
        
        embeddings = []
        for text in texts:
            try:
                # 使用 /api/generate 端点，因为 Ollama 可能不支持直接的嵌入API
                response = requests.post(
                    f"{self.base_url}/api/generate",
                    json={
                        "model": self.model_name,
                        "prompt": f"请为以下文本生成嵌入向量：{text}",
                        "stream": False
                    }
                )
                response.raise_for_status()
                result = response.json()
                
                # 从响应中提取嵌入向量
                # 注意：这里需要根据实际响应格式调整
                embedding = result.get('embedding', [])
                if not embedding:
                    # 如果没有直接返回嵌入向量，我们可以使用模型输出的文本
                    # 这里需要根据实际需求调整
                    embedding = [float(x) for x in result.get('response', '').split()]
                
                embeddings.append(embedding)
            except Exception as e:
                print(f"Error getting embedding: {str(e)}")
                print("请确保：")
                print("1. Ollama 服务正在运行")
                print("2. 模型已经正确加载")
                print("3. 可以访问 Ollama API")
                raise
        
        return embeddings