"""
Core OSS client functionality backed by the GPT-OSS model served via Ollama.
"""

import os
import logging
import requests
import time
from dataclasses import dataclass
from typing import Optional, Dict, Any, List
from pathlib import Path

from .history import QAHistoryManager

# Auto-load .env file if present
def _auto_load_env():
    """Automatically load .env file from package directory or current directory."""
    try:
        from .utils import load_env_file

        return load_env_file()
    except ImportError:
        return False


if os.getenv("QWEN_AUTO_LOAD_ENV", "true").lower() == "true":
    _auto_load_env()


@dataclass
class OSSConfig:
    """Configuration for OSS (GPT-OSS via Ollama) client."""

    api_key: str = ""
    base_url: str = "http://60.245.128.27:11434"
    model_name: str = "gpt-oss:120b"
    max_tokens: int = 4096
    temperature: float = 0.2
    top_p: float = 0.9
    timeout: int = 300
    skip_validation: bool = True

    @classmethod
    def from_env(cls, require_api_key: Optional[bool] = None) -> "OSSConfig":
        """Create config from environment variables."""
        if require_api_key is None:
            skip_validation = os.getenv("OSS_SKIP_API_VALIDATION", "true").lower() == "true"
        else:
            skip_validation = not require_api_key

        api_key = os.getenv("OSS_API_KEY", "")

        if not skip_validation and not api_key:
            raise ValueError("OSS_API_KEY environment variable is required or set OSS_SKIP_API_VALIDATION=true")

        base_url = os.getenv("OSS_BASE_URL") or os.getenv("OLLAMA_BASE_URL") or cls.base_url

        if os.getenv("OSS_MODEL"):
            model_name = os.getenv("OSS_MODEL")
        else:
            ollama_model = os.getenv("OLLAMA_MODEL_ID")
            if ollama_model and "gpt-oss" in ollama_model.lower():
                model_name = ollama_model
            else:
                model_name = cls.model_name

        return cls(
            api_key=api_key,
            base_url=base_url,
            model_name=model_name,
            max_tokens=int(os.getenv("OSS_MAX_TOKENS", str(cls.max_tokens))),
            temperature=float(os.getenv("OSS_TEMPERATURE", str(cls.temperature))),
            top_p=float(os.getenv("OSS_TOP_P", str(cls.top_p))),
            timeout=int(os.getenv("OSS_TIMEOUT", str(cls.timeout))),
            skip_validation=skip_validation,
        )

    @classmethod
    def auto_config(cls) -> "OSSConfig":
        """Auto-create config with .env support and smart validation."""
        _auto_load_env()
        return cls.from_env()


class OSSClient:
    """Standalone client for GPT-OSS models served through Ollama."""

    def __init__(
        self,
        config: OSSConfig,
        logger: Optional[logging.Logger] = None,
        enable_history: bool = True,
    ):
        self.config = config
        self.logger = logger or logging.getLogger(__name__)
        self.session = requests.Session()

        self.history_manager = QAHistoryManager() if enable_history else None

        if config.api_key:
            self.session.headers.update({"Authorization": f"Bearer {config.api_key}"})

        self.session.headers.setdefault("Content-Type", "application/json")

        validation_status = "skipped" if config.skip_validation else "enabled"
        history_status = "enabled" if enable_history else "disabled"
        self.logger.info(
            f"OSSClient initialized with model: {config.model_name} at {config.base_url} "
            f"(validation: {validation_status}, history: {history_status})"
        )

    @classmethod
    def auto_create(cls, logger: Optional[logging.Logger] = None) -> "OSSClient":
        """Auto-create client with .env configuration."""
        config = OSSConfig.auto_config()
        return cls(config, logger)

    def _get_chat_endpoint(self) -> str:
        """Construct the chat completions endpoint for the configured base URL."""
        base = self.config.base_url.rstrip("/")

        if base.endswith("/chat/completions"):
            return base

        if base.endswith("/v1"):
            return f"{base}/chat/completions"

        if base.endswith("/v1/chat"):
            return f"{base}/completions"

        # Default: assume raw host or host + optional path, append /v1/chat/completions
        return f"{base}/v1/chat/completions"

    def ask(self, question: str, system_prompt: Optional[str] = None, **kwargs) -> str:
        """
        Ask the GPT-OSS model a question.

        Args:
            question: User question or prompt.
            system_prompt: Optional system prompt to set assistant behavior.
            **kwargs: Overrides for max_tokens, temperature, top_p, timeout.
        """
        start_time = time.time()

        messages = []
        if system_prompt:
            messages.append({"role": "system", "content": system_prompt})
        messages.append({"role": "user", "content": question})

        payload: Dict[str, Any] = {
            "model": self.config.model_name,
            "messages": messages,
            "max_tokens": kwargs.get("max_tokens", self.config.max_tokens),
            "temperature": kwargs.get("temperature", self.config.temperature),
            "top_p": kwargs.get("top_p", self.config.top_p),
        }

        timeout = kwargs.get("timeout", self.config.timeout)

        # Perform request even if no API key; local Ollama typically runs without auth.
        try:
            endpoint = self._get_chat_endpoint()
            self.logger.debug(f"Calling OSS endpoint: {endpoint}")

            response = self.session.post(endpoint, json=payload, timeout=timeout)
            response.raise_for_status()

            result = response.json()

            if "choices" not in result or not result["choices"]:
                self.logger.warning("No choices returned from OSS response")
                return ""

            answer = result["choices"][0]["message"]["content"].strip()

            if self.history_manager:
                response_time = time.time() - start_time
                tokens_used = result.get("usage", {}).get("total_tokens")
                self.history_manager.add_entry(
                    question=question,
                    answer=answer,
                    model="oss",
                    system_prompt=system_prompt,
                    response_time=response_time,
                    tokens_used=tokens_used,
                    **kwargs,
                )

            return answer

        except requests.exceptions.RequestException as e:
            self.logger.error(f"OSS request failed: {e}")
            raise
        except Exception as e:
            self.logger.error(f"Failed to call OSS model: {e}")
            raise

    def ask_with_context(
        self, question: str, context: Dict[str, str], system_prompt: Optional[str] = None, **kwargs
    ) -> str:
        """Ask the OSS model with additional contextual snippets."""
        context_section = self._format_context(context)
        full_question = f"{question}\n\n{context_section}" if context_section else question
        return self.ask(full_question, system_prompt, **kwargs)

    def ask_chinese_technical(
        self,
        question: str,
        context: Optional[Dict[str, str]] = None,
        domain: str = "技术",
        **kwargs,
    ) -> str:
        """
        Ask the OSS model for a Chinese technical response with domain guidance.
        """
        system_prompt = (
            f"你是{domain}领域的专家。请用中文回答技术问题，提供：\n"
            "1. 详细的技术解释\n"
            "2. 具体的实现方法\n"
            "3. 相关的代码示例（如适用）\n"
            "4. 实际的配置参数和操作步骤\n"
            "5. 潜在的风险点和解决方案\n\n"
            "请提供专业、准确、实用的技术指导。"
        )

        kwargs["chinese_mode"] = True
        kwargs["domain"] = domain
        kwargs["context"] = context

        if context:
            return self.ask_with_context(question, context, system_prompt, **kwargs)
        return self.ask(question, system_prompt, **kwargs)

    def batch_ask(
        self, questions: List[str], system_prompt: Optional[str] = None, **kwargs
    ) -> Dict[str, str]:
        """Ask multiple questions sequentially."""
        results: Dict[str, str] = {}

        for i, question in enumerate(questions, start=1):
            self.logger.info(f"Processing question {i}/{len(questions)}: {question[:50]}...")
            try:
                results[question] = self.ask(question, system_prompt, **kwargs)
            except Exception as e:
                self.logger.error(f"Failed to process question {i}: {e}")
                results[question] = f"[Error: {e}]"

        return results

    def _format_context(self, context: Dict[str, str]) -> str:
        """Format context snippets for inclusion in the prompt."""
        if not context:
            return ""

        context_lines = [
            "## 相关背景信息",
            "",
            "以下是相关的背景信息，可作为参考：",
            "",
        ]

        for idx, (title, content) in enumerate(context.items(), start=1):
            snippet = content[:1000] + ("..." if len(content) > 1000 else "")
            context_lines.extend(
                [
                    f"### 背景{idx}: {title}",
                    "",
                    snippet,
                    "",
                    "---",
                    "",
                ]
            )

        context_lines.extend(
            [
                "请基于上述背景信息和当前问题进行分析和回答。",
                "",
            ]
        )

        return "\n".join(context_lines)

    def validate_connection(self) -> bool:
        """Validate connection to the OSS model endpoint."""
        try:
            test_response = self.ask(
                "Hello! Please reply with a short confirmation that the OSS endpoint is reachable.",
                max_tokens=min(256, self.config.max_tokens),
                timeout=min(self.config.timeout, 60),
            )
            return bool(test_response.strip())
        except Exception as e:
            self.logger.error(f"OSS connection validation failed: {e}")
            return False
