"""Ollama client implementation."""

import time
import aiohttp
from typing import Dict, Any

from .base import BaseLLMClient
from ..config import RequestResult, OllamaConfig


class OllamaClient(BaseLLMClient):
    """Client for Ollama backend."""
    
    def __init__(self, config: OllamaConfig = None):
        """
        Initialize Ollama client.
        
        Args:
            config: OllamaConfig instance. Uses default if None.
        """
        if config is None:
            from ..config import DEFAULT_OLLAMA_CONFIG
            config = DEFAULT_OLLAMA_CONFIG
        
        super().__init__(config.base_url, config.model, config.timeout)
    
    async def generate(
        self,
        session: aiohttp.ClientSession,
        question: str,
        max_tokens: int = 512,
        temperature: float = 0.7
    ) -> RequestResult:
        """
        Generate response using Ollama backend.
        
        Args:
            session: aiohttp session
            question: Input question
            max_tokens: Maximum tokens to generate
            temperature: Sampling temperature
            
        Returns:
            RequestResult with response and metrics
        """
        start_time = time.time()
        
        try:
            payload = {
                "model": self.model,
                "messages": [
                    {"role": "system", "content": "你是一个水文学专家，请详细回答用户的问题。"},
                    {"role": "user", "content": question}
                ],
                "stream": False,
                "options": {
                    "num_predict": max_tokens,
                    "temperature": temperature,
                }
            }
            
            async with session.post(
                f"{self.base_url}/api/chat",
                json=payload,
                timeout=aiohttp.ClientTimeout(total=self.timeout)
            ) as resp:
                if resp.status != 200:
                    error_text = await resp.text()
                    return RequestResult(
                        backend="Ollama",
                        question=question,
                        latency=time.time() - start_time,
                        tokens=0,
                        success=False,
                        error=f"HTTP {resp.status}: {error_text}",
                        timestamp=start_time
                    )
                
                result = await resp.json()
                latency = time.time() - start_time
                
                response_text = result["message"]["content"]
                tokens = result.get("eval_count", 0)
                
                return RequestResult(
                    backend="Ollama",
                    question=question,
                    latency=latency,
                    tokens=tokens,
                    success=True,
                    response=response_text,
                    timestamp=start_time
                )
                
        except Exception as e:
            return RequestResult(
                backend="Ollama",
                question=question,
                latency=time.time() - start_time,
                tokens=0,
                success=False,
                error=str(e),
                timestamp=start_time
            )

