# backend/llm.py
import os
import json
import logging
import requests
import numpy as np
from sentence_transformers import SentenceTransformer
from db import redis_client

logger = logging.getLogger("llm")

EMBED_MODEL = os.getenv("EMBED_MODEL", "sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2")
SIM_THRESHOLD = float(os.getenv("SIM_THRESHOLD", "0.75"))
DEEPSEEK_API = os.getenv("DEEPSEEK_API", "http://localhost:11434")  # TODO: replace with your LLM endpoint

_embedder = None

def get_embedder():
    global _embedder
    if _embedder is None:
        logger.info("Loading embedding model...")
        _embedder = SentenceTransformer(EMBED_MODEL)
    return _embedder

def embed_text(text: str) -> np.ndarray:
    m = get_embedder()
    vec = m.encode([text], normalize_embeddings=True)[0]
    return np.asarray(vec, dtype=np.float32)

def hash_text(text: str) -> str:
    import hashlib
    return hashlib.sha1(text.encode("utf-8")).hexdigest()

def cache_search(text: str):
    # exact
    try:
        key = f"resp:{hash_text(text)}"
        val = redis_client.get(key)
        if val:
            return {"hit": True, "type": "exact", "response": val.decode("utf-8"), "score": 1.0}
    except Exception:
        logger.exception("redis exact get failed")

    # semantic naive scan (demo)
    try:
        qv = embed_text(text)
        best_row = None
        best_score = -1.0
        llen = redis_client.llen("emb_list")
        start = max(0, llen - 200)
        for i in range(start, llen):
            raw = redis_client.lindex("emb_list", i)
            if not raw:
                continue
            row = json.loads(raw.decode("utf-8"))
            vec_hex = row.get("vector")
            if not vec_hex:
                continue
            vec = np.frombuffer(bytes.fromhex(vec_hex), dtype=np.float32)
            # dot product (if normalized) approximates cosine
            score = float(np.dot(qv, vec))
            if score > best_score:
                best_score = score
                best_row = row
        if best_row and best_score >= SIM_THRESHOLD:
            return {"hit": True, "type": "semantic", "response": best_row["response"], "score": float(best_score)}
    except Exception:
        logger.exception("semantic search failed")

    return {"hit": False}

def cache_put(text: str, response: str):
    try:
        v = embed_text(text)
        row = {
            "id": os.urandom(8).hex(),
            "text": text,
            "response": response,
            "vector": v.tobytes().hex(),
            "dim": int(v.shape[0])
        }
        redis_client.rpush("emb_list", json.dumps(row).encode("utf-8"))
        redis_client.set(f"resp:{hash_text(text)}", response.encode("utf-8"), ex=7*24*3600)
    except Exception:
        logger.exception("cache_put failed")

def split_into_chunks(text: str, chunk_size: int = 80):
    return [text[i:i+chunk_size] for i in range(0, len(text), chunk_size)]

def llm_stream_generate(prompt: str):
    """
    Generator that yields dicts {"text": "...", "cached": bool}
    - First checks cache; if hit returns single cached item.
    - Else calls DEEPSEEK_API (synchronous here) and yields chunks (simulated streaming).
    Replace with an actual streaming call if available.
    """
    # cache search
    c = cache_search(prompt)
    if c.get("hit"):
        yield {"text": c["response"], "cached": True}
        return

    # Call remote LLM (single shot), then stream by chunks
    try:
        # TODO: replace with your actual LLM streaming API if available
        url = f"{DEEPSEEK_API}/generate"
        payload = {"model": "deepseek-chat", "prompt": prompt}
        r = requests.post(url, json=payload, timeout=30)
        if r.status_code == 200:
            full = r.json().get("response") or r.text
        else:
            full = f"（LLM 请求失败 status={r.status_code}）"
    except Exception:
        logger.exception("LLM request failed")
        full = "（LLM 调用异常）"

    # stream as chunks
    for chunk in split_into_chunks(full, 80):
        yield {"text": chunk, "cached": False}
    # store cache
    try:
        cache_put(prompt, full)
    except Exception:
        logger.exception("cache put failed")
