Spaces:
Build error
Build error
added 2 new routes and fixed ad support for images
Browse files- .DS_Store +0 -0
- __pycache__/app.cpython-312.pyc +0 -0
- app.py +91 -7
- scrape/__pycache__/main.cpython-312.pyc +0 -0
- test.py +29 -18
- test2.py +27 -0
.DS_Store
CHANGED
|
Binary files a/.DS_Store and b/.DS_Store differ
|
|
|
__pycache__/app.cpython-312.pyc
CHANGED
|
Binary files a/__pycache__/app.cpython-312.pyc and b/__pycache__/app.cpython-312.pyc differ
|
|
|
app.py
CHANGED
|
@@ -163,7 +163,7 @@ async def groqgenerate(json_data: Dict[str, Any]):
|
|
| 163 |
# Create streaming response
|
| 164 |
stream = client.chat.completions.create(
|
| 165 |
messages=messages,
|
| 166 |
-
model="meta-llama/llama-4-scout-17b-16e-instruct",
|
| 167 |
temperature=json_data.get("temperature", 0.7),
|
| 168 |
max_completion_tokens=json_data.get("max_tokens", 1024),
|
| 169 |
top_p=json_data.get("top_p", 1),
|
|
@@ -181,7 +181,7 @@ async def groqgenerate(json_data: Dict[str, Any]):
|
|
| 181 |
"id": chunk_id,
|
| 182 |
"object": "chat.completion.chunk",
|
| 183 |
"created": created,
|
| 184 |
-
"model": json_data.get("model", "llama-
|
| 185 |
"choices": [{
|
| 186 |
"index": 0,
|
| 187 |
"text": content,
|
|
@@ -197,7 +197,7 @@ async def groqgenerate(json_data: Dict[str, Any]):
|
|
| 197 |
"id": chunk_id,
|
| 198 |
"object": "chat.completion.chunk",
|
| 199 |
"created": created,
|
| 200 |
-
"model": json_data.get("model", "llama-
|
| 201 |
"choices": [],
|
| 202 |
"usage": {
|
| 203 |
"prompt_tokens": len(messages),
|
|
@@ -230,9 +230,11 @@ async def vercelXaigenerate(json_data: Dict[str, Any]):
|
|
| 230 |
request_data = {
|
| 231 |
"id": "".join(random.choices("0123456789abcdef", k=16)),
|
| 232 |
"messages": messages,
|
| 233 |
-
"selectedModel": "grok-2-1212"
|
| 234 |
}
|
| 235 |
|
|
|
|
|
|
|
| 236 |
chunk_id = "xai-" + "".join(random.choices("0123456789abcdef", k=32))
|
| 237 |
created = int(asyncio.get_event_loop().time())
|
| 238 |
total_tokens = 0
|
|
@@ -314,7 +316,7 @@ async def vercelGroqgenerate(json_data: Dict[str, Any]):
|
|
| 314 |
request_data = {
|
| 315 |
"id": "".join(random.choices("0123456789abcdef", k=16)),
|
| 316 |
"messages": messages,
|
| 317 |
-
"selectedModel": "deepseek-r1-distill-llama-70b"
|
| 318 |
}
|
| 319 |
|
| 320 |
chunk_id = "vercel-groq-" + "".join(random.choices("0123456789abcdef", k=32))
|
|
@@ -344,7 +346,7 @@ async def vercelGroqgenerate(json_data: Dict[str, Any]):
|
|
| 344 |
"id": chunk_id,
|
| 345 |
"object": "chat.completion.chunk",
|
| 346 |
"created": created,
|
| 347 |
-
"model": json_data.get("model", "
|
| 348 |
"choices": [{
|
| 349 |
"index": 0,
|
| 350 |
"text": text,
|
|
@@ -360,7 +362,7 @@ async def vercelGroqgenerate(json_data: Dict[str, Any]):
|
|
| 360 |
"id": chunk_id,
|
| 361 |
"object": "chat.completion.chunk",
|
| 362 |
"created": created,
|
| 363 |
-
"model": json_data.get("model", "llama-
|
| 364 |
"choices": [],
|
| 365 |
"usage": {
|
| 366 |
"prompt_tokens": len(messages),
|
|
@@ -438,3 +440,85 @@ async def scrape_md(request: Request):
|
|
| 438 |
|
| 439 |
return {"markdown": data}
|
| 440 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 163 |
# Create streaming response
|
| 164 |
stream = client.chat.completions.create(
|
| 165 |
messages=messages,
|
| 166 |
+
model=json_data.get("model", "meta-llama/llama-4-scout-17b-16e-instruct"),
|
| 167 |
temperature=json_data.get("temperature", 0.7),
|
| 168 |
max_completion_tokens=json_data.get("max_tokens", 1024),
|
| 169 |
top_p=json_data.get("top_p", 1),
|
|
|
|
| 181 |
"id": chunk_id,
|
| 182 |
"object": "chat.completion.chunk",
|
| 183 |
"created": created,
|
| 184 |
+
"model": json_data.get("model", "meta-llama/llama-4-scout-17b-16e-instruct"),
|
| 185 |
"choices": [{
|
| 186 |
"index": 0,
|
| 187 |
"text": content,
|
|
|
|
| 197 |
"id": chunk_id,
|
| 198 |
"object": "chat.completion.chunk",
|
| 199 |
"created": created,
|
| 200 |
+
"model": json_data.get("model", "meta-llama/llama-4-scout-17b-16e-instruct"),
|
| 201 |
"choices": [],
|
| 202 |
"usage": {
|
| 203 |
"prompt_tokens": len(messages),
|
|
|
|
| 230 |
request_data = {
|
| 231 |
"id": "".join(random.choices("0123456789abcdef", k=16)),
|
| 232 |
"messages": messages,
|
| 233 |
+
"selectedModel": json_data.get("model", "grok-2-1212"),
|
| 234 |
}
|
| 235 |
|
| 236 |
+
print(request_data)
|
| 237 |
+
|
| 238 |
chunk_id = "xai-" + "".join(random.choices("0123456789abcdef", k=32))
|
| 239 |
created = int(asyncio.get_event_loop().time())
|
| 240 |
total_tokens = 0
|
|
|
|
| 316 |
request_data = {
|
| 317 |
"id": "".join(random.choices("0123456789abcdef", k=16)),
|
| 318 |
"messages": messages,
|
| 319 |
+
"selectedModel": json_data.get("model", "deepseek-r1-distill-llama-70b"),
|
| 320 |
}
|
| 321 |
|
| 322 |
chunk_id = "vercel-groq-" + "".join(random.choices("0123456789abcdef", k=32))
|
|
|
|
| 346 |
"id": chunk_id,
|
| 347 |
"object": "chat.completion.chunk",
|
| 348 |
"created": created,
|
| 349 |
+
"model": json_data.get("model", "deepseek-r1-distill-llama-70b"),
|
| 350 |
"choices": [{
|
| 351 |
"index": 0,
|
| 352 |
"text": text,
|
|
|
|
| 362 |
"id": chunk_id,
|
| 363 |
"object": "chat.completion.chunk",
|
| 364 |
"created": created,
|
| 365 |
+
"model": json_data.get("model", "deepseek-r1-distill-llama-70b"),
|
| 366 |
"choices": [],
|
| 367 |
"usage": {
|
| 368 |
"prompt_tokens": len(messages),
|
|
|
|
| 440 |
|
| 441 |
return {"markdown": data}
|
| 442 |
|
| 443 |
+
@app.post("/v1/generate")
|
| 444 |
+
async def api_generate(request: Request):
|
| 445 |
+
data = await request.json()
|
| 446 |
+
messages = data["messages"]
|
| 447 |
+
model = data["model"]
|
| 448 |
+
if not messages:
|
| 449 |
+
return {"error": "messages is required"}
|
| 450 |
+
elif not model:
|
| 451 |
+
return {"error": "Model is required"}
|
| 452 |
+
|
| 453 |
+
try:
|
| 454 |
+
json_data = {
|
| 455 |
+
'model': model,
|
| 456 |
+
'max_tokens': None,
|
| 457 |
+
'temperature': 0.7,
|
| 458 |
+
'top_p': 0.7,
|
| 459 |
+
'top_k': 50,
|
| 460 |
+
'repetition_penalty': 1,
|
| 461 |
+
'stream_tokens': True,
|
| 462 |
+
'stop': ['<|eot_id|>', '<|eom_id|>'],
|
| 463 |
+
'messages': messages,
|
| 464 |
+
'stream': True,
|
| 465 |
+
}
|
| 466 |
+
|
| 467 |
+
xaimodels = ["grok-3-mini", "grok-2-1212", "grok-3", "grok-3-fast", "grok-3-mini-fast"]
|
| 468 |
+
|
| 469 |
+
if model in xaimodels:
|
| 470 |
+
return StreamingResponse(vercelXaigenerate(json_data), media_type='text/event-stream')
|
| 471 |
+
else:
|
| 472 |
+
try:
|
| 473 |
+
return StreamingResponse(vercelGroqgenerate(json_data), media_type='text/event-stream')
|
| 474 |
+
except Exception as e:
|
| 475 |
+
try:
|
| 476 |
+
return StreamingResponse(generate(json_data), media_type='text/event-stream')
|
| 477 |
+
except Exception as e:
|
| 478 |
+
return StreamingResponse(groqgenerate(json_data), media_type='text/event-stream')
|
| 479 |
+
except Exception as e:
|
| 480 |
+
return {"error": str(e)}
|
| 481 |
+
|
| 482 |
+
@app.post("/v1/generate-images")
|
| 483 |
+
async def generate_images(request: Request):
|
| 484 |
+
data = await request.json()
|
| 485 |
+
prompt = data.get("prompt")
|
| 486 |
+
provider = data.get("provider")
|
| 487 |
+
modelId = data.get("modelId")
|
| 488 |
+
|
| 489 |
+
if not prompt:
|
| 490 |
+
return {"error": "Prompt is required"}
|
| 491 |
+
if not provider:
|
| 492 |
+
return {"error": "Provider is required"}
|
| 493 |
+
if not modelId:
|
| 494 |
+
return {"error": "Model ID is required"}
|
| 495 |
+
|
| 496 |
+
headers = {
|
| 497 |
+
'accept': '*/*',
|
| 498 |
+
'accept-language': 'en-US,en;q=0.9,ja;q=0.8',
|
| 499 |
+
'content-type': 'application/json',
|
| 500 |
+
'origin': 'https://fal-image-generator.vercel.app',
|
| 501 |
+
'priority': 'u=1, i',
|
| 502 |
+
'referer': 'https://fal-image-generator.vercel.app/',
|
| 503 |
+
'sec-ch-ua': '"Google Chrome";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
|
| 504 |
+
'sec-ch-ua-mobile': '?0',
|
| 505 |
+
'sec-ch-ua-platform': '"macOS"',
|
| 506 |
+
'sec-fetch-dest': 'empty',
|
| 507 |
+
'sec-fetch-mode': 'cors',
|
| 508 |
+
'sec-fetch-site': 'same-origin',
|
| 509 |
+
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36',
|
| 510 |
+
}
|
| 511 |
+
|
| 512 |
+
json_data = {
|
| 513 |
+
'prompt': prompt,
|
| 514 |
+
'provider': 'fal',
|
| 515 |
+
'modelId': 'fal-ai/fast-sdxl',
|
| 516 |
+
}
|
| 517 |
+
|
| 518 |
+
async with httpx.AsyncClient() as client:
|
| 519 |
+
response = await client.post(
|
| 520 |
+
'https://fal-image-generator.vercel.app/api/generate-images',
|
| 521 |
+
headers=headers,
|
| 522 |
+
json=json_data
|
| 523 |
+
)
|
| 524 |
+
return response.json()
|
scrape/__pycache__/main.cpython-312.pyc
CHANGED
|
Binary files a/scrape/__pycache__/main.cpython-312.pyc and b/scrape/__pycache__/main.cpython-312.pyc differ
|
|
|
test.py
CHANGED
|
@@ -1,22 +1,33 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
import html2text
|
| 4 |
|
| 5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
-
|
| 12 |
-
for tag in soup(['script', 'style', 'noscript', 'svg']):
|
| 13 |
-
tag.decompose()
|
| 14 |
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
#
|
| 22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
import json
|
|
|
|
| 3 |
|
| 4 |
+
messages = [
|
| 5 |
+
{"role": "user", "content": "helo"},
|
| 6 |
+
{"role": "assistant", "content": "Hello! How can I assist you today?"},
|
| 7 |
+
{"role": "user", "content": "who are you and give me a breif description of who you are"}
|
| 8 |
+
]
|
| 9 |
|
| 10 |
+
model = "grok-3"
|
| 11 |
+
url = "http://127.0.0.1:8000/v1/generate"
|
| 12 |
+
payload = {
|
| 13 |
+
"messages": messages,
|
| 14 |
+
"model": model
|
| 15 |
+
}
|
| 16 |
|
| 17 |
+
response = requests.post(url, json=payload, stream=True)
|
|
|
|
|
|
|
| 18 |
|
| 19 |
+
if response.status_code == 200:
|
| 20 |
+
for line in response.iter_lines():
|
| 21 |
+
if line:
|
| 22 |
+
decoded_line = line.decode('utf-8')
|
| 23 |
+
if decoded_line.startswith('data: '):
|
| 24 |
+
try:
|
| 25 |
+
# Remove 'data: ' prefix and parse JSON
|
| 26 |
+
json_data = json.loads(decoded_line[6:])
|
| 27 |
+
# Check if there are choices and text
|
| 28 |
+
if json_data["choices"] and "text" in json_data["choices"][0]:
|
| 29 |
+
print(json_data["choices"][0]["text"], end='')
|
| 30 |
+
except json.JSONDecodeError:
|
| 31 |
+
continue
|
| 32 |
+
else:
|
| 33 |
+
print(f"Request failed with status code {response.status_code}")
|
test2.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
|
| 3 |
+
headers = {
|
| 4 |
+
'accept': '*/*',
|
| 5 |
+
'accept-language': 'en-US,en;q=0.9,ja;q=0.8',
|
| 6 |
+
'content-type': 'application/json',
|
| 7 |
+
'origin': 'https://fal-image-generator.vercel.app',
|
| 8 |
+
'priority': 'u=1, i',
|
| 9 |
+
'referer': 'https://fal-image-generator.vercel.app/',
|
| 10 |
+
'sec-ch-ua': '"Google Chrome";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
|
| 11 |
+
'sec-ch-ua-mobile': '?0',
|
| 12 |
+
'sec-ch-ua-platform': '"macOS"',
|
| 13 |
+
'sec-fetch-dest': 'empty',
|
| 14 |
+
'sec-fetch-mode': 'cors',
|
| 15 |
+
'sec-fetch-site': 'same-origin',
|
| 16 |
+
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36',
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
json_data = {
|
| 20 |
+
'prompt': 'A frog meditating on a lotus leaf in a tranquil forest pond at dawn, surrounded by fireflies, in the style of anime',
|
| 21 |
+
'provider': 'fal',
|
| 22 |
+
'modelId': 'fal-ai/fast-sdxl',
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
response = requests.post('http://127.0.0.1:8000/v1/generate-images', headers=headers, json=json_data)
|
| 26 |
+
|
| 27 |
+
print(response.json())
|