# api_server.py
# 注：此文件假定在 Apple Silicon (如 mbaM4) 环境下运行，
# 并已通过 requirements_mlx.txt 安装了 mlx 相关的依赖。
# 它作为独立的 LLM API 服务，供 test_generator.py 远程调用。

from mlx_lm import load, generate
import threading
from http.server import HTTPServer, BaseHTTPRequestHandler
import json
import time
import argparse
import logging

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# 全局模型和 tokenizer
model = None
tokenizer = None
model_lock = threading.Lock()
model_loaded_successfully = False # 跟踪模型加载状态

def load_model(model_path):
    global model, tokenizer, model_loaded_successfully
    logger.info(f"[INFO] 尝试加载 MLX 模型: {model_path}")
    try:
        model, tokenizer = load(model_path)
        model_loaded_successfully = True
        logger.info("[INFO] MLX 模型加载完成。")
    except ImportError as e:
        model_loaded_successfully = False
        logger.critical(f"❌ 无法导入 mlx_lm 库: {e}. 请确保运行在 Apple Silicon 环境，且已通过 requirements_mlx.txt 安装 mlx 库。")
        logger.critical(f"LLM API 服务将无法提供生成功能。")
    except Exception as e:
        model_loaded_successfully = False
        logger.critical(f"❌ 加载 MLX 模型 {model_path} 失败: {e}. LLM API 服务将无法提供生成功能。")

class APIHandler(BaseHTTPRequestHandler):
    def _set_headers(self, status_code=200, content_type='application/json'):
        self.send_response(status_code)
        self.send_header('Content-Type', content_type)
        self.end_headers()

    def do_POST(self):
        if self.path == "/v1/completions":
            content_length = int(self.headers['Content-Length'])
            body = self.rfile.read(content_length)
            data = json.loads(body)

            prompt = data.get("prompt", "")
            max_tokens = data.get("max_tokens", 100)
            
            # 如果模型未成功加载，直接返回服务不可用
            if not model_loaded_successfully:
                logger.error("LLM API 调用失败: MLX 模型未加载或加载失败。")
                self._set_headers(503) # Service Unavailable
                self.wfile.write(json.dumps({"error": "MLX LLM model not loaded or failed to load."}).encode())
                return

            with model_lock: # 确保模型生成是线程安全的
                try:
                    response = generate(model, tokenizer, prompt, max_tokens=max_tokens)
                except Exception as e:
                    logger.error(f"MLX generate 调用失败: {e}", exc_info=True)
                    self._set_headers(500)
                    self.wfile.write(json.dumps({"error": str(e)}).encode())
                    return

            self._set_headers(200)
            self.wfile.write(json.dumps({
                "id": f"gen-{int(time.time())}",
                "object": "text_completion",
                "created": int(time.time()),
                "model": "phi-3-mini", # 固定为 phi-3-mini
                "choices": [{
                    "index": 0,
                    "text": response,
                    "logprobs": None,
                    "finish_reason": "length"
                }],
                "usage": {
                    "prompt_tokens": len(tokenizer.encode(prompt)),
                    "completion_tokens": len(tokenizer.encode(response)),
                    "total_tokens": len(tokenizer.encode(prompt)) + len(tokenizer.encode(response))
                }
            }).encode())
        else:
            self._set_headers(404)
            self.wfile.write(json.dumps({"error": "Not Found"}).encode())

    def do_GET(self):
        if self.path == "/v1/models":
            self._set_headers(200)
            self.wfile.write(json.dumps({
                "data": [{
                    "id": "phi-3-mini",
                    "object": "model",
                    "owned_by": "microsoft"
                }],
                "object": "list"
            }).encode())
        elif self.path == "/health":
            self._set_headers(200)
            # 健康检查应反映模型加载状态
            status = "ok" if model_loaded_successfully else "model_not_loaded"
            self.wfile.write(json.dumps({"status": status}).encode())
        else:
            self._set_headers(404)
            self.wfile.write(json.dumps({"error": "Not Found"}).encode())

def run_server(host="127.0.0.1", port=8000, model_path="./phi3-mini"):
    load_model(model_path) # 尝试加载模型
    server = HTTPServer((host, port), APIHandler)
    logger.info(f"OpenAI 兼容 API 服务启动在 http://{host}:{port}/v1")
    try:
        server.serve_forever()
    except KeyboardInterrupt:
        logger.info("服务器已关闭。")
        server.server_close()

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--model", type=str, default="./phi3-mini", help="本地 MLX 模型路径")
    parser.add_argument("--host", type=str, default="127.0.0.1", help="API 服务监听地址")
    parser.add_argument("--port", type=int, default=8000, help="API 服务监听端口")
    args = parser.parse_args()

    run_server(host=args.host, port=args.port, model_path=args.model)
