| |
| """ |
| OpenAI-compatible API server for Ministral 14B with streaming support |
| Fixed chat template for base models |
| """ |
|
|
| import subprocess |
| import sys |
|
|
| def install_deps(): |
| try: |
| import torch |
| need_torch = not torch.cuda.is_available() |
| except ImportError: |
| need_torch = True |
|
|
| print("=== Installing dependencies ===") |
|
|
| if need_torch: |
| subprocess.check_call([sys.executable, "-m", "pip", "install", "-q", "torch"]) |
|
|
| subprocess.check_call([sys.executable, "-m", "pip", "install", "-q", |
| "git+https://github.com/huggingface/transformers.git"]) |
|
|
| subprocess.check_call([sys.executable, "-m", "pip", "install", "-q", |
| "accelerate", "fastapi", "uvicorn", "pydantic", "sentencepiece", "protobuf"]) |
|
|
| print("=== Dependencies installed ===") |
|
|
| install_deps() |
|
|
| import torch |
| from transformers import AutoProcessor, AutoModelForImageTextToText, TextIteratorStreamer |
| from fastapi import FastAPI |
| from fastapi.responses import StreamingResponse |
| from pydantic import BaseModel |
| from typing import List, Optional |
| import uvicorn |
| import time |
| import traceback |
| import json |
| import asyncio |
| from threading import Thread |
|
|
| app = FastAPI() |
|
|
| |
| MISTRAL_CHAT_TEMPLATE = """{{- bos_token }} |
| {%- for message in messages %} |
| {%- if message['role'] == 'system' %} |
| {{- '[INST] ' + message['content'] + '\n\n' }} |
| {%- elif message['role'] == 'user' %} |
| {%- if loop.index0 == 0 and messages[0]['role'] != 'system' %} |
| {{- '[INST] ' + message['content'] + ' [/INST]' }} |
| {%- elif messages[0]['role'] == 'system' and loop.index0 == 1 %} |
| {{- message['content'] + ' [/INST]' }} |
| {%- else %} |
| {{- '[INST] ' + message['content'] + ' [/INST]' }} |
| {%- endif %} |
| {%- elif message['role'] == 'assistant' %} |
| {{- message['content'] + eos_token }} |
| {%- endif %} |
| {%- endfor %} |
| {%- if add_generation_prompt %} |
| {%- if messages[-1]['role'] != 'assistant' %} |
| {%- endif %} |
| {%- endif %}""" |
|
|
| def fix_bpe_tokens(text): |
| """Fix BPE tokenization artifacts""" |
| text = text.replace("Ġ", " ") |
| text = text.replace("Ċ", "\n") |
| text = text.replace("ĉ", "\t") |
| text = text.replace("âĢĻ", "'") |
| text = text.replace("âĢľ", '"') |
| text = text.replace("âĢĿ", '"') |
| text = text.replace("âĢĶ", "—") |
| text = text.replace("âĢĵ", "–") |
| text = text.replace("â̦", "…") |
| text = text.replace("âĢĺ", "'") |
| return text |
|
|
| def format_messages_mistral(messages): |
| """Format messages using Mistral format manually""" |
| text = "<s>" |
|
|
| for i, m in enumerate(messages): |
| role = m["role"] |
| content = m["content"] |
|
|
| if role == "system": |
| |
| text += f"[INST] {content}\n\n" |
| elif role == "user": |
| if i == 0: |
| |
| text += f"[INST] {content} [/INST]" |
| elif i > 0 and messages[i-1]["role"] == "system": |
| |
| text += f"{content} [/INST]" |
| else: |
| |
| text += f"[INST] {content} [/INST]" |
| elif role == "assistant": |
| text += f"{content}</s>" |
|
|
| return text |
|
|
| model = None |
| processor = None |
|
|
| class Message(BaseModel): |
| role: str |
| content: str |
|
|
| class ChatRequest(BaseModel): |
| model: str = "ministral-14b" |
| messages: List[Message] |
| max_tokens: Optional[int] = 2048 |
| temperature: Optional[float] = 0.7 |
| top_p: Optional[float] = 0.9 |
| top_k: Optional[int] = None |
| min_p: Optional[float] = None |
| typical_p: Optional[float] = None |
| repetition_penalty: Optional[float] = None |
| no_repeat_ngram_size: Optional[int] = None |
| stream: Optional[bool] = False |
|
|
| @app.on_event("startup") |
| async def load_model(): |
| global model, processor |
| print("Loading Ministral 14B...") |
|
|
| model_id = "RoleModel/ministral-14b-merged-official" |
|
|
| processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True) |
|
|
| |
| if processor.tokenizer.chat_template is None: |
| print("Setting Mistral chat template...") |
| processor.tokenizer.chat_template = MISTRAL_CHAT_TEMPLATE |
|
|
| |
| device = "cuda:0" if torch.cuda.is_available() else "cpu" |
| print(f"Using device: {device}") |
| print(f"CUDA available: {torch.cuda.is_available()}") |
|
|
| model = AutoModelForImageTextToText.from_pretrained( |
| model_id, |
| torch_dtype=torch.bfloat16, |
| device_map=device, |
| trust_remote_code=True, |
| ) |
| model.eval() |
| print("Model loaded successfully!") |
|
|
| @app.post("/v1/chat/completions") |
| async def chat_completions(request: ChatRequest): |
| global model, processor |
|
|
| try: |
| messages = [{"role": m.role, "content": m.content} for m in request.messages] |
| print(f"Processing {len(messages)} messages, stream={request.stream}") |
|
|
| |
| try: |
| chat_text = processor.apply_chat_template( |
| messages, |
| tokenize=False, |
| add_generation_prompt=True |
| ) |
| except Exception as e: |
| print(f"Chat template error: {e}, using manual format") |
| chat_text = format_messages_mistral(messages) |
|
|
| print(f"Formatted prompt:\n{chat_text[:500]}...") |
|
|
| inputs = processor.tokenizer(chat_text, return_tensors="pt").to(model.device) |
| input_len = inputs["input_ids"].shape[1] |
| print(f"Input tokens: {input_len}") |
|
|
| if request.stream: |
| async def generate_stream(): |
| streamer = TextIteratorStreamer( |
| processor.tokenizer, |
| skip_prompt=True, |
| skip_special_tokens=True |
| ) |
|
|
| generation_kwargs = { |
| **inputs, |
| "max_new_tokens": request.max_tokens, |
| "temperature": request.temperature if request.temperature and request.temperature > 0 else 1.0, |
| "top_p": request.top_p if request.top_p else 0.9, |
| "do_sample": request.temperature is not None and request.temperature > 0, |
| "pad_token_id": processor.tokenizer.eos_token_id, |
| "streamer": streamer, |
| } |
| if request.top_k is not None: |
| generation_kwargs["top_k"] = request.top_k |
| if request.min_p is not None: |
| generation_kwargs["min_p"] = request.min_p |
| if request.typical_p is not None: |
| generation_kwargs["typical_p"] = request.typical_p |
| if request.repetition_penalty is not None: |
| generation_kwargs["repetition_penalty"] = request.repetition_penalty |
| if request.no_repeat_ngram_size is not None: |
| generation_kwargs["no_repeat_ngram_size"] = request.no_repeat_ngram_size |
|
|
| thread = Thread(target=model.generate, kwargs=generation_kwargs) |
| thread.start() |
|
|
| response_id = f"chatcmpl-{int(time.time())}" |
|
|
| for text in streamer: |
| if text: |
| text = fix_bpe_tokens(text) |
| chunk = { |
| "id": response_id, |
| "object": "chat.completion.chunk", |
| "created": int(time.time()), |
| "model": request.model, |
| "choices": [{ |
| "index": 0, |
| "delta": {"content": text}, |
| "finish_reason": None |
| }] |
| } |
| yield f"data: {json.dumps(chunk)}\n\n" |
| await asyncio.sleep(0) |
|
|
| final_chunk = { |
| "id": response_id, |
| "object": "chat.completion.chunk", |
| "created": int(time.time()), |
| "model": request.model, |
| "choices": [{ |
| "index": 0, |
| "delta": {}, |
| "finish_reason": "stop" |
| }] |
| } |
| yield f"data: {json.dumps(final_chunk)}\n\n" |
| yield "data: [DONE]\n\n" |
|
|
| thread.join() |
|
|
| return StreamingResponse( |
| generate_stream(), |
| media_type="text/event-stream", |
| headers={ |
| "Cache-Control": "no-cache, no-store, must-revalidate", |
| "Connection": "keep-alive", |
| "X-Accel-Buffering": "no", |
| "Transfer-Encoding": "chunked", |
| } |
| ) |
| else: |
| generation_kwargs = { |
| **inputs, |
| "max_new_tokens": request.max_tokens, |
| "temperature": request.temperature if request.temperature and request.temperature > 0 else 1.0, |
| "top_p": request.top_p if request.top_p else 0.9, |
| "do_sample": request.temperature is not None and request.temperature > 0, |
| "pad_token_id": processor.tokenizer.eos_token_id, |
| } |
| if request.top_k is not None: |
| generation_kwargs["top_k"] = request.top_k |
| if request.min_p is not None: |
| generation_kwargs["min_p"] = request.min_p |
| if request.typical_p is not None: |
| generation_kwargs["typical_p"] = request.typical_p |
| if request.repetition_penalty is not None: |
| generation_kwargs["repetition_penalty"] = request.repetition_penalty |
| if request.no_repeat_ngram_size is not None: |
| generation_kwargs["no_repeat_ngram_size"] = request.no_repeat_ngram_size |
|
|
| with torch.no_grad(): |
| outputs = model.generate(**generation_kwargs) |
|
|
| new_tokens = outputs[0][input_len:] |
| response_text = processor.tokenizer.decode( |
| new_tokens, |
| skip_special_tokens=True, |
| clean_up_tokenization_spaces=True |
| ) |
| response_text = fix_bpe_tokens(response_text) |
| print(f"Generated {len(new_tokens)} tokens") |
|
|
| return { |
| "id": f"chatcmpl-{int(time.time())}", |
| "object": "chat.completion", |
| "created": int(time.time()), |
| "model": request.model, |
| "choices": [{ |
| "index": 0, |
| "message": {"role": "assistant", "content": response_text}, |
| "finish_reason": "stop" |
| }], |
| "usage": { |
| "prompt_tokens": input_len, |
| "completion_tokens": len(new_tokens), |
| "total_tokens": input_len + len(new_tokens) |
| } |
| } |
| except Exception as e: |
| print(f"Error: {e}") |
| traceback.print_exc() |
| raise |
|
|
| @app.get("/v1/models") |
| async def list_models(): |
| return { |
| "object": "list", |
| "data": [{"id": "ministral-14b", "object": "model", "owned_by": "rolemodel"}] |
| } |
|
|
| @app.get("/health") |
| async def health(): |
| return {"status": "ok"} |
|
|
| if __name__ == "__main__": |
| uvicorn.run(app, host="0.0.0.0", port=8000) |
|
|