import requests
import time
from typing import Dict, Any
from functools import wraps
from concurrent.futures import ThreadPoolExecutor, as_completed
from queue import Queue, Empty
from threading import Semaphore
from flask import current_app
import logging

class OllamaClient:
    """Ollama API 客户端，支持并发限制和重试机制"""
    
    def __init__(self, base_url: str, max_concurrent: int = 3, max_retries: int = 3, retry_delay: float = 1.0):
        self.base_url = base_url
        self.max_concurrent = max_concurrent
        self.max_retries = max_retries
        self.retry_delay = retry_delay
        self.semaphore = Semaphore(max_concurrent)
        self.logger = logging.getLogger(__name__)

    def _make_request(self, endpoint: str, data: Dict[str, Any], stream: bool = False) -> Any:
        """发送请求到 Ollama API，支持重试机制"""
        url = f"{self.base_url}/api/{endpoint}"
        retries = 0
        
        while retries < self.max_retries:
            try:
                with self.semaphore:  # 使用信号量限制并发
                    response = requests.post(url, json=data, stream=stream)
                    response.raise_for_status()
                    return response
                    
            except requests.exceptions.RequestException as e:
                retries += 1
                self.logger.warning(f"Request failed (attempt {retries}/{self.max_retries}): {str(e)}")
                
                if retries == self.max_retries:
                    raise Exception(f"Failed after {self.max_retries} attempts: {str(e)}")
                    
                time.sleep(self.retry_delay * retries)  # 指数退避

    def chat(self, messages: list, model: str = 'openchat', stream: bool = True) -> Any:
        """聊天接口"""
        data = {
            "model": model,
            "messages": messages,
            "stream": stream
        }
        return self._make_request("chat", data, stream=stream)

    def embeddings(self, text: str, model: str = 'openchat') -> list:
        """生成文本嵌入向量"""
        data = {
            "model": model,
            "prompt": text
        }
        response = self._make_request("embeddings", data)
        return response.json()["embedding"]

    def batch_embeddings(self, texts: list, model: str = 'openchat', batch_size: int = 10) -> list:
        """批量生成文本嵌入向量"""
        embeddings = []
        
        # 将文本分批处理
        for i in range(0, len(texts), batch_size):
            batch = texts[i:i + batch_size]
            futures = []
            
            with ThreadPoolExecutor(max_workers=self.max_concurrent) as executor:
                for text in batch:
                    future = executor.submit(self.embeddings, text, model)
                    futures.append(future)
                
                # 收集结果
                for future in as_completed(futures):
                    try:
                        embedding = future.result()
                        embeddings.append(embedding)
                    except Exception as e:
                        self.logger.error(f"Error generating embedding: {str(e)}")
                        raise
        
        return embeddings

# 创建全局客户端实例
ollama_client = None

def get_ollama_client():
    """获取或创建 Ollama 客户端实例"""
    global ollama_client
    if ollama_client is None:
        ollama_client = OllamaClient(
            base_url=current_app.config['OLLAMA_BASE_URL'],
            max_concurrent=current_app.config.get('OLLAMA_MAX_CONCURRENT', 3),
            max_retries=current_app.config.get('OLLAMA_MAX_RETRIES', 3),
            retry_delay=current_app.config.get('OLLAMA_RETRY_DELAY', 1.0)
        )
    return ollama_client 