File size: 2,642 Bytes
ac94146
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
from fastapi import FastAPI
from pydantic import BaseModel
import requests
import uuid
from datetime import datetime

app = FastAPI()

class ChatRequest(BaseModel):
    model: str
    messages: list

@app.post("/v1/chat/completions")
async def chat_completions(chat_request: ChatRequest):
    # Extract the user prompt from the last user message
    user_prompt = ""
    for msg in reversed(chat_request.messages):
        if msg["role"] == "user":
            user_prompt = msg["content"]
            break

    # Prepare the payload for blackbox.ai
    payload = {
        "query": user_prompt, 
        "agentMode": True
    }

    # Define headers (replicating from the original cURL as needed)
    url = "https://www.blackbox.ai/api/image-generator"
    headers = {
        "sec-ch-ua-full-version-list": "\"Google Chrome\";v=\"131.0.6778.266\", \"Chromium\";v=\"131.0.6778.266\", \"Not_A Brand\";v=\"24.0.0.0\"",
        "sec-ch-ua-platform": "\"Windows\"",
        "Referer": "https://www.blackbox.ai/agent/create/new",
        "sec-ch-ua": "\"Google Chrome\";v=\"131\", \"Chromium\";v=\"131\", \"Not_A Brand\";v=\"24\"",
        "sec-ch-ua-bitness": "\"64\"",
        "sec-ch-ua-model": "\"\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-arch": "\"x86\"",
        "sec-ch-ua-full-version": "\"131.0.6778.266\"",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
        "Content-Type": "text/plain;charset=UTF-8",
        "sec-ch-ua-platform-version": "\"19.0.0\""
    }

    response = requests.post(url, json=payload, headers=headers, timeout=30)
    response_json = response.json()
    markdown_result = response_json.get("markdown", "")

    # Post-process the response from Blackbox if needed
    cleaned_full_response = markdown_result.strip()

    # Very simplistic token counting
    prompt_tokens = len(user_prompt.split())
    completion_tokens = len(cleaned_full_response.split())

    # Build an OpenAI-like response
    request_id = str(uuid.uuid4())
    return {
        "id": request_id,
        "object": "chat.completion",
        "created": int(datetime.now().timestamp()),
        "model": chat_request.model,
        "choices": [
            {
                "index": 0,
                "message": {"role": "assistant", "content": cleaned_full_response},
                "finish_reason": "stop",
            }
        ],
        "usage": {
            "prompt_tokens": prompt_tokens,
            "completion_tokens": completion_tokens,
            "total_tokens": prompt_tokens + completion_tokens,
        },
    }