"""Simple OpenAI-compatible inference service using transformers.

Endpoints (subset):
POST /v1/chat/completions
POST /v1/completions (classic single prompt)
GET  /health

Environment variables:
  INFER_MODEL (default: "gpt2")  # replace with local path if needed
  MAX_NEW_TOKENS (default: 256)
  DEVICE (default: auto cuda if available else cpu)

Run:
  uvicorn service.openai_service:app --host 0.0.0.0 --port 8001 --reload

This is a minimal subset; streaming not yet implemented (set stream=false or omit).
"""
from __future__ import annotations
import os
import time
import uuid
from typing import List, Optional, Any, Dict
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel, Field
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
try:
    import jinja2  # optional
except ImportError:  # pragma: no cover
    jinja2 = None

# ------------------ Config ------------------
MODEL_NAME = os.environ.get("INFER_MODEL", "gpt2")
MAX_NEW_TOKENS = int(os.environ.get("MAX_NEW_TOKENS", 256))
TEMPERATURE = float(os.environ.get("TEMPERATURE", 0.7))
TOP_P = float(os.environ.get("TOP_P", 0.95))
DEVICE = os.environ.get("DEVICE")  # None -> auto

# ------------------ Lazy model / template ------------------
_tokenizer = None
_model = None
_chat_template_cached: str | None = None


def _load():
    global _tokenizer, _model
    if _tokenizer is None or _model is None:
        try:
            _tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
            _model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float16 if torch.cuda.is_available() else None)
            if DEVICE:
                _model.to(DEVICE)
            else:
                if torch.cuda.is_available():
                    _model.to("cuda")
        except Exception as e:
            raise RuntimeError(f"Failed to load model {MODEL_NAME}: {e}")
    return _tokenizer, _model

# ------------------ Schemas ------------------
class ChatMessage(BaseModel):
    role: str
    content: str

class ChatCompletionRequest(BaseModel):
    model: Optional[str] = None
    messages: List[ChatMessage]
    max_tokens: Optional[int] = None
    temperature: Optional[float] = None
    top_p: Optional[float] = None
    stream: Optional[bool] = False
    # Extra fields for advanced templates (e.g. qwen.tpl)
    tools: Optional[List[Dict[str, Any]]] = None
    add_generation_prompt: Optional[bool] = True

class ChatCompletionChoice(BaseModel):
    index: int
    message: ChatMessage
    finish_reason: str | None = None

class ChatCompletionUsage(BaseModel):
    prompt_tokens: int
    completion_tokens: int
    total_tokens: int

class ChatCompletionResponse(BaseModel):
    id: str
    object: str = "chat.completion"
    created: int
    model: str
    choices: List[ChatCompletionChoice]
    usage: ChatCompletionUsage

class CompletionRequest(BaseModel):
    model: Optional[str] = None
    prompt: str
    max_tokens: Optional[int] = None
    temperature: Optional[float] = None
    top_p: Optional[float] = None

class CompletionChoice(BaseModel):
    text: str
    index: int
    finish_reason: str | None = None

class CompletionResponse(BaseModel):
    id: str
    object: str = "text_completion"
    created: int
    model: str
    choices: List[CompletionChoice]
    usage: ChatCompletionUsage

# ------------------ Helpers ------------------

def _load_chat_template() -> str:
    """Load template file once.

    Environment variable: CHAT_TEMPLATE_PATH
    If provided and file exists, its content is used.
    Placeholders supported (python str.format):
      {conversation}  - concatenated conversation with role tags and trailing [ASSISTANT] marker
      {system}        - first system message content (empty if none)
      {last_user}     - last user message content (empty if none)
      {model}         - model name
    Unknown placeholders are left as-is.
    """
    global _chat_template_cached
    if _chat_template_cached is not None:
        return _chat_template_cached
    path = os.environ.get("CHAT_TEMPLATE_PATH")
    if path and os.path.isfile(path):
        try:
            with open(path, "r", encoding="utf-8") as f:
                _chat_template_cached = f.read()
        except Exception as e:
            _chat_template_cached = ""  # fallback silently
    else:
        _chat_template_cached = ""
    return _chat_template_cached


class _SafeDict(dict):
    def __missing__(self, key):  # leave unknown placeholder untouched
        return "{" + key + "}"


def _is_jinja_template(tpl: str) -> bool:
    return any(p in tpl for p in ("{%-", "{%", "{{", "}}")) and "messages" in tpl


def _build_chat_prompt(req: ChatCompletionRequest) -> str:
    messages = req.messages
    parts = []
    system_msg = ""
    last_user = ""
    for m in messages:
        role = m.role.lower()
        if role == "system" and not system_msg:
            system_msg = m.content
        if role == "user":
            last_user = m.content
        if role == "system":
            parts.append(f"[SYSTEM] {m.content}\n")
        elif role == "user":
            parts.append(f"[USER] {m.content}\n")
        elif role == "assistant":
            parts.append(f"[ASSISTANT] {m.content}\n")
        else:
            parts.append(f"[{role.upper()}] {m.content}\n")
    parts.append("[ASSISTANT] ")
    conversation = "".join(parts)
    template = _load_chat_template()
    if not template.strip():
        return conversation
    # Jinja2 path
    if _is_jinja_template(template):
        if jinja2 is None:
            raise RuntimeError("jinja2 not installed but Jinja template detected. Please pip install jinja2.")
        env = jinja2.Environment(autoescape=False, trim_blocks=True, lstrip_blocks=True)
        try:
            jtpl = env.from_string(template)
            # Convert messages to expected richer structure
            rich_messages = []
            for m in messages:
                rich_messages.append({
                    "role": m.role,
                    "content": m.content,
                    # placeholders expected by qwen style template
                    "reasoning_content": None,
                    "tool_calls": [],
                })
            rendered = jtpl.render(
                messages=rich_messages,
                tools=req.tools or [],
                add_generation_prompt=req.add_generation_prompt,
            )
        except Exception as e:
            # fallback to plain conversation
            rendered = conversation
        return rendered
    # str.format path
    ctx = _SafeDict(conversation=conversation, system=system_msg, last_user=last_user, model=MODEL_NAME)
    try:
        rendered = template.format_map(ctx)
    except Exception:
        rendered = conversation
    if "[ASSISTANT]" not in rendered:
        rendered = rendered.rstrip() + "\n[ASSISTANT] "
    return rendered


def _generate(prompt: str, max_new_tokens: int, temperature: float, top_p: float):
    tokenizer, model = _load()
    inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
    with torch.no_grad():
        output_ids = model.generate(
            **inputs,
            max_new_tokens=max_new_tokens,
            do_sample=True,
            temperature=temperature,
            top_p=top_p,
            pad_token_id=tokenizer.eos_token_id,
        )
    generated = output_ids[0][inputs["input_ids"].shape[1]:]
    text = tokenizer.decode(generated, skip_special_tokens=True)
    return text, inputs["input_ids"].shape[1], len(generated)

# ------------------ FastAPI ------------------
app = FastAPI(title="OpenAI Compatible Inference", version="0.1.0")

@app.get("/health")
async def health():
    return {"status": "ok", "model": MODEL_NAME}

@app.post("/v1/chat/completions", response_model=ChatCompletionResponse)
async def chat_completions(req: ChatCompletionRequest):
    if not req.messages:
        raise HTTPException(400, "messages required")
    model_name = req.model or MODEL_NAME
    if model_name != MODEL_NAME:
        # For now we only host one model
        raise HTTPException(400, f"Only single hosted model {MODEL_NAME}")
    prompt = _build_chat_prompt(req)
    max_new = req.max_tokens or MAX_NEW_TOKENS
    temperature = req.temperature or TEMPERATURE
    top_p = req.top_p or TOP_P
    text, prompt_tokens, completion_tokens = _generate(prompt, max_new, temperature, top_p)
    now = int(time.time())
    choice = ChatCompletionChoice(index=0, message=ChatMessage(role="assistant", content=text), finish_reason="stop")
    usage = ChatCompletionUsage(prompt_tokens=prompt_tokens, completion_tokens=completion_tokens, total_tokens=prompt_tokens+completion_tokens)
    return ChatCompletionResponse(id=f"chatcmpl-{uuid.uuid4().hex}", created=now, model=model_name, choices=[choice], usage=usage)

@app.post("/v1/completions", response_model=CompletionResponse)
async def completions(req: CompletionRequest):
    model_name = req.model or MODEL_NAME
    if model_name != MODEL_NAME:
        raise HTTPException(400, f"Only single hosted model {MODEL_NAME}")
    prompt = req.prompt
    max_new = req.max_tokens or MAX_NEW_TOKENS
    temperature = req.temperature or TEMPERATURE
    top_p = req.top_p or TOP_P
    text, prompt_tokens, completion_tokens = _generate(prompt, max_new, temperature, top_p)
    now = int(time.time())
    choice = CompletionChoice(index=0, text=text, finish_reason="stop")
    usage = ChatCompletionUsage(prompt_tokens=prompt_tokens, completion_tokens=completion_tokens, total_tokens=prompt_tokens+completion_tokens)
    return CompletionResponse(id=f"cmpl-{uuid.uuid4().hex}", created=now, model=model_name, choices=[choice], usage=usage)

# NOTE: Streaming endpoint could be added later using Server-Sent Events.
