import os
import json
import requests
from flask import (
    Flask,
    request,
    jsonify,
    Response,
    render_template,
    send_from_directory,
    abort,
)
from flask_cors import CORS
from dotenv import load_dotenv

load_dotenv()

OLLAMA_BASE = os.getenv("OLLAMA_BASE_URL", "http://127.0.0.1:11434")
API_KEY = os.getenv("API_KEY", "")
DEBUG = os.getenv("DEBUG", "false").lower() == "true"
MODEL = os.getenv("MODEL", "llama3.2:1b")

app = Flask(__name__, template_folder="templates", static_folder="static")
CORS(app, resources={r"/api/*": {"origins": "*"}})


def check_auth(req):
    if not API_KEY:
        return True
    return req.headers.get("API-Key") == API_KEY


@app.route("/")
def index():
    return render_template("index.html")


@app.route("/healthz")
def health():
    try:
        r = requests.get(f"{OLLAMA_BASE}/api/tags", timeout=2)
        ok = r.status_code == 200
    except Exception:
        ok = False
    return jsonify({"ok": ok})


# 列出可用模型
@app.route("/api/models", methods=["GET"])
def list_models():
    if not check_auth(request):
        return jsonify({"error": "unauthorized"}), 401
    r = requests.get(f"{OLLAMA_BASE}/api/tags", timeout=10)
    return jsonify(r.json()), r.status_code


# 非流式对话
@app.route("/api/chat", methods=["POST"])
def chat():
    if not check_auth(request):
        return jsonify({"error": "unauthorized"}), 401

    data = request.get_json(force=True, silent=True) or {}
    model = data.get("model", MODEL)
    prompt = data.get("prompt", "")
    system = data.get("system", "")
    options = data.get("options", {})  # 例如 { "temperature": 0.7, "num_ctx": 2048 }
    messages = data.get("messages")  # 可选：OpenAI 风格 messages

    payload = None
    endpoint = None

    # 优先兼容 messages（聊天风格）
    if messages:
        endpoint = f"{OLLAMA_BASE}/api/chat"
        payload = {
            "model": model,
            "messages": messages,
            "options": options,
            "stream": False,
        }
    else:
        endpoint = f"{OLLAMA_BASE}/api/generate"
        full_prompt = prompt if not system else f"<<SYS>>\n{system}\n<</SYS>>\n{prompt}"
        payload = {
            "model": model,
            "prompt": full_prompt,
            "options": options,
            "stream": False,
        }

    r = requests.post(endpoint, json=payload, timeout=600)
    return jsonify(r.json()), r.status_code


# 流式对话（SSE）
@app.route("/api/chat/stream", methods=["POST"])
def chat_stream():
    if not check_auth(request):
        return jsonify({"error": "unauthorized"}), 401

    data = request.get_json(force=True, silent=True) or {}
    model = data.get("model", MODEL)
    prompt = data.get("prompt", "")
    system = data.get("system", "")
    options = data.get("options", {})
    messages = data.get("messages")

    # 根据是否有 messages 决定调用 chat 还是 generate
    if messages:
        endpoint = f"{OLLAMA_BASE}/api/chat"
        payload = {
            "model": model,
            "messages": messages,
            "options": options,
            "stream": True,
        }
    else:
        endpoint = f"{OLLAMA_BASE}/api/generate"
        full_prompt = prompt if not system else f"<<SYS>>\n{system}\n<</SYS>>\n{prompt}"
        payload = {
            "model": model,
            "prompt": full_prompt,
            "options": options,
            "stream": True,
        }

    app.logger.info(f"[chat_stream] endpoint={endpoint} payload={payload}")

    def event_stream():
        try:
            with requests.post(endpoint, json=payload, stream=True, timeout=600) as r:
                # 如果状态码不是 200，直接返回错误事件
                if r.status_code != 200:
                    app.logger.error(f"[chat_stream] error {r.status_code}: {r.text}")
                    yield f"event: error\ndata: {json.dumps({'status': r.status_code, 'error': r.text}, ensure_ascii=False)}\n\n"
                    return

                # 正常流式返回
                for line in r.iter_lines(decode_unicode=True):
                    if not line:
                        continue
                    try:
                        obj = json.loads(line)
                    except Exception as e:
                        app.logger.warning(
                            f"[chat_stream] JSON decode error: {e}, line={line}"
                        )
                        continue
                    yield f"data: {json.dumps(obj, ensure_ascii=False)}\n\n"

        except Exception as e:
            # 捕获 requests 或网络异常
            app.logger.exception(f"[chat_stream] exception: {e}")
            yield f"event: error\ndata: {json.dumps({'exception': str(e)}, ensure_ascii=False)}\n\n"

        # 最后发一个 done 事件
        yield "event: done\ndata: {}\n\n"

    return Response(event_stream(), mimetype="text/event-stream")


# 嵌入接口（可选）
@app.route("/api/embeddings", methods=["POST"])
def embeddings():
    if not check_auth(request):
        return jsonify({"error": "unauthorized"}), 401
    data = request.get_json(force=True, silent=True) or {}
    model = data.get("model", "nomic-embed-text")
    input_text = data.get("input", "")
    r = requests.post(
        f"{OLLAMA_BASE}/api/embeddings",
        json={"model": model, "prompt": input_text},
        timeout=120,
    )
    return jsonify(r.json()), r.status_code


# 自动匹配所有剩余路由
@app.route("/<path:path>")
def catch_all(path):
    try:
        return render_template(f"{path}.html")
    except Exception:
        return render_template("404.html")


if __name__ == "__main__":
    host = os.getenv("HOST", "0.0.0.0")
    port = int(os.getenv("PORT", "5000"))
    app.run(host=host, port=port, debug=DEBUG, threaded=True)
