|
from fastapi import FastAPI, HTTPException |
|
from pydantic import BaseModel |
|
from typing import List, Dict, Any |
|
import re |
|
import random |
|
import string |
|
from aiohttp import ClientSession |
|
|
|
|
|
class ImageResponse: |
|
def __init__(self, url: str, alt: str): |
|
self.url = url |
|
self.alt = alt |
|
|
|
def to_data_uri(image: Any) -> str: |
|
|
|
return "data:image/png;base64,..." |
|
|
|
class AsyncGeneratorProvider: |
|
pass |
|
|
|
class ProviderModelMixin: |
|
pass |
|
|
|
class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): |
|
url = "https://www.blackbox.ai" |
|
api_endpoint = "https://www.blackbox.ai/api/chat" |
|
default_model = 'blackbox' |
|
models = ['blackbox', 'gemini-1.5-flash', "llama-3.1-8b"] |
|
|
|
@classmethod |
|
async def create_async_generator(cls, model: str, messages: List[Dict[str, str]]) -> Any: |
|
|
|
return {"content": "This is a mock response from the model."} |
|
|
|
app = FastAPI() |
|
|
|
class Message(BaseModel): |
|
role: str |
|
content: str |
|
|
|
class ChatRequest(BaseModel): |
|
model: str |
|
messages: List[Message] |
|
|
|
@app.post("/v1/chat/completions") |
|
async def chat_completions(request: ChatRequest): |
|
messages = [{"role": msg.role, "content": msg.content} for msg in request.messages] |
|
|
|
response = await Blackbox.create_async_generator( |
|
model=request.model, |
|
messages=messages |
|
) |
|
|
|
return { |
|
"id": "chatcmpl-1234", |
|
"object": "chat.completion", |
|
"created": 1690000000, |
|
"model": request.model, |
|
"choices": [ |
|
{ |
|
"message": { |
|
"role": "assistant", |
|
"content": response['content'] |
|
}, |
|
"finish_reason": "stop", |
|
"index": 0 |
|
} |
|
] |
|
} |
|
|