from fastapi import FastAPI, Request, Form
from fastapi.responses import HTMLResponse, StreamingResponse, JSONResponse
from pydantic import BaseModel
import uvicorn
import os
import psutil
from typing import Optional, Dict, Any
from lemonade.api import from_pretrained

os.environ["ORT_DML_ENABLE"] = "1"  # 优先使用 AMD GPU
os.environ["OMP_TARGET"] = "NPU"
os.environ["LEMONADE_NPU_PREFER"] = "1"

# ========== 数据模型定义 ==========
class GenerateRequest(BaseModel):
    model: str
    prompt: str
    stream: bool = False
    options: Optional[Dict[str, Any]] = None

class GenerateResponse(BaseModel):
    model: str
    created_at: str
    response: str
    done: bool
    context: Optional[list] = None
    total_duration: Optional[int] = None
    load_duration: Optional[int] = None
    prompt_eval_count: Optional[int] = None
    prompt_eval_duration: Optional[int] = None
    eval_count: Optional[int] = None
    eval_duration: Optional[int] = None

# ========== 模型管理部分 ==========
def check_memory():
    mem = psutil.virtual_memory()
    print(f"可用内存: {mem.available/1024/1024:.2f} MB")
    if mem.available < 4 * 1024 * 1024 * 1024:  # 4GB
        raise MemoryError("可用内存不足，至少需要4GB空闲内存")

def model_load(model_dir: str, recipe: str = "oga-hybrid"):
    try:
        print(f"加载模型从: {model_dir} 使用配方: {recipe}")
        model, tokenizer = from_pretrained(model_dir, recipe=recipe)
        return model, tokenizer
    except Exception as e:
        print(f"❌ 加载模型失败: {e}")
        raise

# ===== WEB need ======         
def get_tokenizer(model):
    tokenizer = og.Tokenizer(model)
    tokenizer_stream = tokenizer.create_stream()
    return tokenizer, tokenizer_stream

def setup(model, tokenizer, tokenizer_stream, prompt):
    search_options = {'max_length': 2048}
    input_tokens = tokenizer.encode(prompt)
    params = og.GeneratorParams(model)
    params.set_search_options(**search_options)
    # Create an OgaNamedTensors object with input_ids
    named_tensors = og.NamedTensors()
    named_tensors["input_ids"] = np.array([input_tokens], dtype=np.int64)
    params.set_inputs(named_tensors)  # Set inputs using OgaNamedTensors
    generator = og.Generator(model, params)
    return generator

def get_chat_user_prompt(input: str):
    return f'<|user|>\n{input} <|end|>\n<|assistant|>\n'

def get_cot_user_prompt(input: str):
    if model_running_name == "Qwen2.5-Math-1.5B":
        return f'<|User|>\n{input}\n<|助理|>\n<think>\n'
    elif model_running_name == "DeepSeek-R1-Distill-Llama-8B":
        return f'<|User|>\n{input}<|Assistant|>\n<think>\n'
    else:
        return f'<|User|>\n{input}<|end|>\n<|Assistant|>\n<think>\n'        
        
        
        
# ========== Web界面部分 ==========
def get_html_template():
    return """
    <!DOCTYPE html>
    <html>
    <head>
        <title>LLM-ONNX API Server</title>
        <style>
            body { font-family: Arial, sans-serif; max-width: 800px; margin: 0 auto; padding: 20px; }
            .container { display: flex; flex-direction: column; gap: 20px; }
            .card { border: 1px solid #ddd; border-radius: 8px; padding: 15px; }
            textarea { width: 100%; height: 100px; }
            button { padding: 8px 15px; background: #4CAF50; color: white; border: none; border-radius: 4px; cursor: pointer; }
            button:hover { background: #45a049; }
            #response { white-space: pre-wrap; border: 1px solid #eee; padding: 10px; min-height: 100px; }
            .model-info { background: #f5f5f5; padding: 10px; }
        </style>
    </head>
    <body>
        <div class="container">
            <h1>LLM-ONNX API Server v0.1</h1>
            
            <div class="card">
                <h2>当前模型状态</h2>
                <div class="model-info" id="model-info">
                    {{model_status}}
                </div>
            </div>
            
            <div class="card">
                <h2>切换模型</h2>
                <form action="/change_model" method="post" onsubmit="event.preventDefault(); changeModel();">
                    <select name="model_name" id="model-select">
                        {{model_options}}
                    </select>
                    <button type="submit">切换模型</button>
                </form>
            </div>
            
            <div class="card">
                <h2>与模型对话</h2>
                <form onsubmit="event.preventDefault(); sendMessage();">
                    <textarea id="user-input" placeholder="输入你的问题..."></textarea>
                    <button type="submit">发送</button>
                </form>
                <h3>模型回复:</h3>
                <div id="response"></div>
            </div>
            
            <div class="card">
                <h2>API文档</h2>
                <p><a href="/docs" target="_blank">查看Swagger API文档</a></p>
                <p><a href="/redoc" target="_blank">查看ReDoc API文档</a></p>
            </div>
        </div>
        
        <script>
            async function changeModel() {
                const modelName = document.getElementById('model-select').value;
                const response = await fetch('/change_model', {
                    method: 'POST',
                    headers: { 'Content-Type': 'application/json' },
                    body: JSON.stringify({ model_name: modelName })
                });
                const data = await response.json();
                updateModelStatus();
                alert(data.response || data.error);
            }
            
            async function sendMessage() {
                const userInput = document.getElementById('user-input').value;
                const responseDiv = document.getElementById('response');
                responseDiv.innerHTML = "模型正在思考...";
                
                try {
                    const response = await fetch('/v1/chat/completions', {
                        method: 'POST',
                        headers: { 'Content-Type': 'application/json' },
                        body: JSON.stringify({ input: userInput })
                    });
                    
                    if (response.body) {
                        const reader = response.body.getReader();
                        const decoder = new TextDecoder();
                        responseDiv.innerHTML = "";
                        
                        while (true) {
                            const { done, value } = await reader.read();
                            if (done) break;
                            responseDiv.innerHTML += decoder.decode(value);
                            responseDiv.scrollTop = responseDiv.scrollHeight;
                        }
                    }
                } catch (error) {
                    responseDiv.innerHTML = "错误: " + error;
                }
            }
            
            async function updateModelStatus() {
                const response = await fetch('/model_status');
                const data = await response.json();
                document.getElementById('model-info').innerHTML = `
                    <strong>当前模型:</strong> ${data.model_name || '未加载'}<br>
                    <strong>状态:</strong> ${data.status}
                `;
            }
            
            // 初始化页面时加载模型状态
            document.addEventListener('DOMContentLoaded', updateModelStatus);
        </script>
    </body>
    </html>
    """





# ========== FastAPI应用部分 ==========
app = FastAPI()

dir = os.path.dirname(os.path.abspath(__file__))
# 模型映射表：Ollama模型名 -> 本地模型路径
models_paths = {
    "deepseek-r1:1.5b": f"{dir}\\models\\amd\\DeepSeek-R1-Distill-Qwen-1___5B-awq-asym-uint4-g128-lmhead-onnx-hybrid",
    "deepseek-r1:7b": f"{dir}\\models\\amd\\DeepSeek-R1-Distill-Qwen-7B-awq-g128-int4-asym-bf16-onnx-ryzen-strix",
    "deepseek-r1:8b": f"{dir}\\models\\amd\\DeepSeek-R1-Distill-Llama-8B-awq-g128-int4-asym-bf16-onnx-ryzen-strix",
    "mistral:7b": f"{dir}\\models\\LLM-Research\\mistral-7b-instruct-v0___2-ONNX\\onnx\\directml\\mistralai_Mistral-7B-Instruct-v0.2",
    "llama2:7b": f"{dir}\\models\\amd\\Llama-2-7b-chat-hf-awq-g128-int4-asym-fp16-onnx-hybrid",
    "hybrid-8B-ds":f"{dir}\\models\\amd\\DeepSeek-R1-Distill-Llama-8B-awq-asym-uint4-g128-lmhead-onnx-hybrid",
    "ryzenAI-1.5-8B-ds":f"{dir}\\models\\deepseek-ai\\DeepSeek-R1-Distill-Llama-8B"

}

print(f"可用的模型映射: {models_paths}")

# 全局变量
model = None
tokenizer = None
tokenizer_stream = None
model_running_name = None


# ========== API路由部分 ==========
@app.get("/", response_class=HTMLResponse)
async def root():
    model_options = "".join(
        f'<option value="{name}" {"selected" if name==model_running_name else ""}>{name}</option>'
        for name in models_paths
    )
    
    model_status = (
        f"<strong>当前模型:</strong> {model_running_name or '未加载'}<br>"
        f"<strong>状态:</strong> {'已加载' if model else '未加载'}"
    )
    
    html = get_html_template()
    html = html.replace("{{model_options}}", model_options)
    html = html.replace("{{model_status}}", model_status)
    return HTMLResponse(content=html)

@app.get("/model_status")
async def get_model_status():
    return {
        "model_name": model_running_name,
        "status": "loaded" if model else "not loaded"
    }

@app.post("/change_model") 
async def change_model(model_config: dict):
    model_name = model_config.get("model_name")
    if model_name not in models_paths:
        return {"error": f"模型不支持，仅支持: {list(models_paths.keys())}"}
        
    global model, tokenizer, model_running_name, tokenizer_stream
    
    try:
        check_memory()
        model_path = models_paths[model_name]
        
        # 释放之前加载的模型
        if model is not None:
            del model
            del tokenizer
            del tokenizer_stream
            import gc
            gc.collect()
        
        model, tokenizer = model_load(model_path)
        model_running_name = model_name
        tokenizer, tokenizer_stream = get_tokenizer(model)
        return {"response": "模型切换成功!"}
    except Exception as e:
        return {"error": f"加载模型失败: {str(e)}"}
        
# ===== like ollama api ====

@app.post("/api/generate")
async def generate(request: GenerateRequest):
    """Ollama兼容的生成端点"""
    global model, tokenizer, model_running_name
    
    # 检查模型是否已加载或需要切换
    if model_running_name != request.model:
        if request.model not in models_paths:
            return JSONResponse(
                status_code=404,
                content={"error": f"模型 '{request.model}' 未找到"}
            )
        
        try:
            check_memory()
            model_path = models_paths[request.model]
            
            # 释放之前加载的模型
            if model is not None:
                del model
                del tokenizer
                import gc
                gc.collect()
            
            model, tokenizer = model_load(model_path)
            model_running_name = request.model
        except Exception as e:
            return JSONResponse(
                status_code=500,
                content={"error": f"加载模型失败: {str(e)}"}
            )
    
    # 生成响应
    input_ids = tokenizer(request.prompt, return_tensors="pt").input_ids
    response = model.generate(input_ids, max_new_tokens=2048)
    decoded_text = tokenizer.decode(response[0])
    
    if request.stream:
        async def stream_response():
            # 流式返回结果
            for i in range(0, len(decoded_text), 100):
                chunk = decoded_text[i:i+100]
                yield json.dumps({
                    "model": request.model,
                    "created_at": "2023-01-01T00:00:00.000000Z",  # 示例时间
                    "response": chunk,
                    "done": False
                }) + "\n"
                
                if await request.is_disconnected():
                    break
            
            # 发送结束标记
            yield json.dumps({
                "model": request.model,
                "created_at": "2023-01-01T00:00:00.000000Z",
                "response": "",
                "done": True,
                "total_duration": 1000,  # 示例值
                "eval_count": len(response[0]),
            }) + "\n"
        
        return StreamingResponse(
            stream_response(),
            media_type="application/x-ndjson"
        )
    else:
        return {
            "model": request.model,
            "created_at": "2023-01-01T00:00:00.000000Z",
            "response": decoded_text,
            "done": True,
            "total_duration": 1000,  # 示例值
            "eval_count": len(response[0]),
        }

# 保持原有的聊天端点兼容性
@app.post("/v1/chat/completions")
async def chat(request: Request):
    if model is None:
        return {"error": "请先加载模型"}
    
    data = await request.json()
    ollama_request = GenerateRequest(
        model=data.get("model", model_running_name or list(models_paths.keys())[0]),
        prompt=data["input"],
        stream=data.get("stream", False)
    )
    return await generate(ollama_request)

# 模型列表端点
@app.get("/api/tags")
async def list_models():
    return {
        "models": [
            {
                "name": name,
                "modified_at": "2023-01-01T00:00:00.000000Z",
                "size": 0,  # 可以添加实际大小
                "digest": "sha256:xxxx"  # 可以添加实际digest
            }
            for name in models_paths.keys()
        ]
    }

# 健康检查端点
@app.get("/healthy")
async def healthy():
    return {"status": "healthy"}

if __name__ == "__main__":
    uvicorn.run(app, host="127.0.0.1", port=9090)
