Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,15 +1,14 @@
|
|
| 1 |
"""
|
| 2 |
Sixfinger Backend API - FRONTEND UYUMLU VERSİYON
|
| 3 |
Ultra-fast AI Chat Backend with Multi-Model Support
|
| 4 |
-
Supports: Groq,
|
| 5 |
"""
|
| 6 |
|
| 7 |
import os
|
| 8 |
import time
|
| 9 |
import json
|
| 10 |
import logging
|
| 11 |
-
import
|
| 12 |
-
from typing import Optional, Dict, Any, Generator
|
| 13 |
from datetime import datetime
|
| 14 |
|
| 15 |
from fastapi import FastAPI, HTTPException, Header, Request
|
|
@@ -30,12 +29,6 @@ PROVIDERS = {
|
|
| 30 |
"type": "groq",
|
| 31 |
"requires_key": True
|
| 32 |
},
|
| 33 |
-
"deepinfra": {
|
| 34 |
-
"name": "DeepInfra",
|
| 35 |
-
"type": "deepinfra",
|
| 36 |
-
"base_url": "https://api.deepinfra.com/v1/openai/chat/completions",
|
| 37 |
-
"requires_key": False
|
| 38 |
-
},
|
| 39 |
"llm7": {
|
| 40 |
"name": "LLM7.io",
|
| 41 |
"type": "openai_compatible",
|
|
@@ -73,41 +66,6 @@ MODELS = {
|
|
| 73 |
"daily_limit": 300
|
| 74 |
},
|
| 75 |
|
| 76 |
-
# DeepInfra Models (Free)
|
| 77 |
-
"llama4-maverick": {
|
| 78 |
-
"provider": "deepinfra",
|
| 79 |
-
"model_id": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-Turbo",
|
| 80 |
-
"display_name": "Llama 4 Maverick 17B",
|
| 81 |
-
"size": "17B",
|
| 82 |
-
"language": "Multilingual",
|
| 83 |
-
"speed": "⚡⚡",
|
| 84 |
-
"description": "Meta'nın en yeni hızlı ve yetenekli modeli",
|
| 85 |
-
"plans": ["free", "starter", "pro", "plus"],
|
| 86 |
-
"daily_limit": 1000
|
| 87 |
-
},
|
| 88 |
-
"qwen3-coder": {
|
| 89 |
-
"provider": "deepinfra",
|
| 90 |
-
"model_id": "Qwen/Qwen3-Coder-480B-A35B-Instruct-Turbo",
|
| 91 |
-
"display_name": "Qwen3 Coder 480B",
|
| 92 |
-
"size": "480B",
|
| 93 |
-
"language": "Multilingual",
|
| 94 |
-
"speed": "⚡",
|
| 95 |
-
"description": "Kod yazma uzmanı dev model",
|
| 96 |
-
"plans": ["free", "starter", "pro", "plus"],
|
| 97 |
-
"daily_limit": 500
|
| 98 |
-
},
|
| 99 |
-
"deepseek-r1": {
|
| 100 |
-
"provider": "deepinfra",
|
| 101 |
-
"model_id": "deepseek-ai/DeepSeek-R1-0528-Turbo",
|
| 102 |
-
"display_name": "DeepSeek R1 Turbo",
|
| 103 |
-
"size": "Unknown",
|
| 104 |
-
"language": "Multilingual",
|
| 105 |
-
"speed": "⚡",
|
| 106 |
-
"description": "Muhakeme ve zeka odaklı model",
|
| 107 |
-
"plans": ["free", "starter", "pro", "plus"],
|
| 108 |
-
"daily_limit": 500
|
| 109 |
-
},
|
| 110 |
-
|
| 111 |
# ============ STARTER PLAN MODELS ============
|
| 112 |
|
| 113 |
# LLM7.io Models (Starter+)
|
|
@@ -331,41 +289,6 @@ def call_groq_api(
|
|
| 331 |
logger.error(f"Groq API error: {e}")
|
| 332 |
raise HTTPException(status_code=500, detail=f"Groq API error: {str(e)}")
|
| 333 |
|
| 334 |
-
def call_deepinfra_api(
|
| 335 |
-
model_id: str,
|
| 336 |
-
messages: list,
|
| 337 |
-
max_tokens: int,
|
| 338 |
-
temperature: float,
|
| 339 |
-
top_p: float,
|
| 340 |
-
stream: bool = False
|
| 341 |
-
) -> Dict[str, Any]:
|
| 342 |
-
"""DeepInfra API'ye istek at (non-streaming)"""
|
| 343 |
-
url = PROVIDERS["deepinfra"]["base_url"]
|
| 344 |
-
headers = {
|
| 345 |
-
"Content-Type": "application/json",
|
| 346 |
-
"X-Deepinfra-Source": "web-page"
|
| 347 |
-
}
|
| 348 |
-
|
| 349 |
-
data = {
|
| 350 |
-
"model": model_id,
|
| 351 |
-
"messages": messages,
|
| 352 |
-
"max_tokens": max_tokens,
|
| 353 |
-
"temperature": temperature,
|
| 354 |
-
"top_p": top_p,
|
| 355 |
-
"stream": stream
|
| 356 |
-
}
|
| 357 |
-
|
| 358 |
-
try:
|
| 359 |
-
if stream:
|
| 360 |
-
return requests.post(url, headers=headers, json=data, stream=True)
|
| 361 |
-
else:
|
| 362 |
-
response = requests.post(url, headers=headers, json=data)
|
| 363 |
-
response.raise_for_status()
|
| 364 |
-
return response.json()
|
| 365 |
-
except Exception as e:
|
| 366 |
-
logger.error(f"DeepInfra API error: {e}")
|
| 367 |
-
raise HTTPException(status_code=500, detail=f"DeepInfra API error: {str(e)}")
|
| 368 |
-
|
| 369 |
def call_llm7_api(
|
| 370 |
model_id: str,
|
| 371 |
messages: list,
|
|
@@ -401,8 +324,6 @@ def call_api(
|
|
| 401 |
"""Universal API caller - provider'a göre yönlendir"""
|
| 402 |
if provider == "groq":
|
| 403 |
return call_groq_api(model_id, messages, max_tokens, temperature, top_p, stream)
|
| 404 |
-
elif provider == "deepinfra":
|
| 405 |
-
return call_deepinfra_api(model_id, messages, max_tokens, temperature, top_p, stream)
|
| 406 |
elif provider == "llm7":
|
| 407 |
return call_llm7_api(model_id, messages, max_tokens, temperature, top_p, stream)
|
| 408 |
else:
|
|
@@ -419,7 +340,6 @@ def health_check():
|
|
| 419 |
"timestamp": datetime.now().isoformat(),
|
| 420 |
"providers": {
|
| 421 |
"groq": bool(GROQ_API_KEY),
|
| 422 |
-
"deepinfra": True,
|
| 423 |
"llm7": True
|
| 424 |
}
|
| 425 |
}
|
|
@@ -452,44 +372,22 @@ def chat(
|
|
| 452 |
)
|
| 453 |
|
| 454 |
try:
|
| 455 |
-
|
| 456 |
-
|
| 457 |
-
|
| 458 |
-
|
| 459 |
-
|
| 460 |
-
|
| 461 |
-
|
| 462 |
-
|
| 463 |
-
|
| 464 |
-
|
| 465 |
-
|
| 466 |
-
|
| 467 |
-
|
| 468 |
-
|
| 469 |
-
|
| 470 |
-
|
| 471 |
-
"prompt_tokens": usage.get("prompt_tokens", 0),
|
| 472 |
-
"completion_tokens": usage.get("completion_tokens", 0),
|
| 473 |
-
"total_tokens": usage.get("total_tokens", 0)
|
| 474 |
-
}
|
| 475 |
-
else:
|
| 476 |
-
# Groq veya LLM7 response
|
| 477 |
-
response = call_api(
|
| 478 |
-
provider=provider,
|
| 479 |
-
model_id=model_id,
|
| 480 |
-
messages=messages,
|
| 481 |
-
max_tokens=request.max_tokens,
|
| 482 |
-
temperature=request.temperature,
|
| 483 |
-
top_p=request.top_p,
|
| 484 |
-
stream=False
|
| 485 |
-
)
|
| 486 |
-
|
| 487 |
-
content = response.choices[0].message.content
|
| 488 |
-
usage = {
|
| 489 |
-
"prompt_tokens": getattr(response.usage, 'prompt_tokens', 0),
|
| 490 |
-
"completion_tokens": getattr(response.usage, 'completion_tokens', 0),
|
| 491 |
-
"total_tokens": getattr(response.usage, 'total_tokens', 0)
|
| 492 |
-
}
|
| 493 |
|
| 494 |
elapsed = time.time() - start_time
|
| 495 |
logger.info(f"Chat completed: provider={provider}, tokens={usage['total_tokens']}, time={elapsed:.2f}s")
|
|
@@ -574,52 +472,6 @@ def chat_stream(
|
|
| 574 |
logger.error(f"Groq stream error: {e}")
|
| 575 |
yield f"data: {json.dumps({'error': str(e)})}\n\n"
|
| 576 |
|
| 577 |
-
def generate_deepinfra():
|
| 578 |
-
"""DeepInfra streaming generator"""
|
| 579 |
-
try:
|
| 580 |
-
yield f"data: {json.dumps({'info': f'Using {model_key} via DeepInfra'})}\n\n"
|
| 581 |
-
|
| 582 |
-
url = PROVIDERS["deepinfra"]["base_url"]
|
| 583 |
-
headers = {
|
| 584 |
-
"Content-Type": "application/json",
|
| 585 |
-
"X-Deepinfra-Source": "web-page"
|
| 586 |
-
}
|
| 587 |
-
data = {
|
| 588 |
-
"model": model_id,
|
| 589 |
-
"messages": messages,
|
| 590 |
-
"max_tokens": request.max_tokens,
|
| 591 |
-
"temperature": request.temperature,
|
| 592 |
-
"top_p": request.top_p,
|
| 593 |
-
"stream": True
|
| 594 |
-
}
|
| 595 |
-
|
| 596 |
-
response = requests.post(url, headers=headers, json=data, stream=True)
|
| 597 |
-
|
| 598 |
-
total_completion_tokens = 0
|
| 599 |
-
|
| 600 |
-
for line in response.iter_lines():
|
| 601 |
-
if line:
|
| 602 |
-
decoded = line.decode('utf-8')
|
| 603 |
-
if decoded.startswith("data: "):
|
| 604 |
-
content = decoded[6:]
|
| 605 |
-
if content == "[DONE]":
|
| 606 |
-
break
|
| 607 |
-
try:
|
| 608 |
-
json_data = json.loads(content)
|
| 609 |
-
delta = json_data.get("choices", [{}])[0].get("delta", {})
|
| 610 |
-
if "content" in delta:
|
| 611 |
-
token = delta["content"]
|
| 612 |
-
yield f"data: {json.dumps({'text': token})}\n\n"
|
| 613 |
-
total_completion_tokens += 1
|
| 614 |
-
except json.JSONDecodeError:
|
| 615 |
-
continue
|
| 616 |
-
|
| 617 |
-
yield f"data: {json.dumps({'done': True, 'model_key': model_key, 'provider': 'deepinfra', 'attempts': 1, 'usage': {'prompt_tokens': 0, 'completion_tokens': total_completion_tokens, 'total_tokens': total_completion_tokens}})}\n\n"
|
| 618 |
-
|
| 619 |
-
except Exception as e:
|
| 620 |
-
logger.error(f"DeepInfra stream error: {e}")
|
| 621 |
-
yield f"data: {json.dumps({'error': str(e)})}\n\n"
|
| 622 |
-
|
| 623 |
def generate_llm7():
|
| 624 |
"""LLM7.io streaming generator"""
|
| 625 |
try:
|
|
@@ -651,8 +503,6 @@ def chat_stream(
|
|
| 651 |
# Provider'a göre generator seç
|
| 652 |
if provider == "groq":
|
| 653 |
generator = generate_groq()
|
| 654 |
-
elif provider == "deepinfra":
|
| 655 |
-
generator = generate_deepinfra()
|
| 656 |
elif provider == "llm7":
|
| 657 |
generator = generate_llm7()
|
| 658 |
else:
|
|
@@ -717,12 +567,6 @@ def list_providers():
|
|
| 717 |
"status": "active" if GROQ_API_KEY else "inactive",
|
| 718 |
"description": "Ultra-fast inference with Groq LPU"
|
| 719 |
},
|
| 720 |
-
{
|
| 721 |
-
"id": "deepinfra",
|
| 722 |
-
"name": "DeepInfra",
|
| 723 |
-
"status": "active",
|
| 724 |
-
"description": "Free tier AI models - Llama 4, Qwen3 Coder, DeepSeek"
|
| 725 |
-
},
|
| 726 |
{
|
| 727 |
"id": "llm7",
|
| 728 |
"name": "LLM7.io",
|
|
@@ -761,7 +605,6 @@ async def startup_event():
|
|
| 761 |
logger.info("🚀 Sixfinger Backend API started")
|
| 762 |
logger.info(f"📦 Version: {API_VERSION}")
|
| 763 |
logger.info(f"🔑 Groq API: {'✅ Configured' if GROQ_API_KEY else '❌ Not configured'}")
|
| 764 |
-
logger.info(f"🌐 DeepInfra: ✅ Active (Free tier)")
|
| 765 |
logger.info(f"🌐 LLM7.io: ✅ Active (Free tier)")
|
| 766 |
logger.info(f"🤖 Total Models: {len(MODELS)}")
|
| 767 |
|
|
|
|
| 1 |
"""
|
| 2 |
Sixfinger Backend API - FRONTEND UYUMLU VERSİYON
|
| 3 |
Ultra-fast AI Chat Backend with Multi-Model Support
|
| 4 |
+
Supports: Groq, LLM7.io
|
| 5 |
"""
|
| 6 |
|
| 7 |
import os
|
| 8 |
import time
|
| 9 |
import json
|
| 10 |
import logging
|
| 11 |
+
from typing import Optional, Dict, Any
|
|
|
|
| 12 |
from datetime import datetime
|
| 13 |
|
| 14 |
from fastapi import FastAPI, HTTPException, Header, Request
|
|
|
|
| 29 |
"type": "groq",
|
| 30 |
"requires_key": True
|
| 31 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
"llm7": {
|
| 33 |
"name": "LLM7.io",
|
| 34 |
"type": "openai_compatible",
|
|
|
|
| 66 |
"daily_limit": 300
|
| 67 |
},
|
| 68 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
# ============ STARTER PLAN MODELS ============
|
| 70 |
|
| 71 |
# LLM7.io Models (Starter+)
|
|
|
|
| 289 |
logger.error(f"Groq API error: {e}")
|
| 290 |
raise HTTPException(status_code=500, detail=f"Groq API error: {str(e)}")
|
| 291 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 292 |
def call_llm7_api(
|
| 293 |
model_id: str,
|
| 294 |
messages: list,
|
|
|
|
| 324 |
"""Universal API caller - provider'a göre yönlendir"""
|
| 325 |
if provider == "groq":
|
| 326 |
return call_groq_api(model_id, messages, max_tokens, temperature, top_p, stream)
|
|
|
|
|
|
|
| 327 |
elif provider == "llm7":
|
| 328 |
return call_llm7_api(model_id, messages, max_tokens, temperature, top_p, stream)
|
| 329 |
else:
|
|
|
|
| 340 |
"timestamp": datetime.now().isoformat(),
|
| 341 |
"providers": {
|
| 342 |
"groq": bool(GROQ_API_KEY),
|
|
|
|
| 343 |
"llm7": True
|
| 344 |
}
|
| 345 |
}
|
|
|
|
| 372 |
)
|
| 373 |
|
| 374 |
try:
|
| 375 |
+
response = call_api(
|
| 376 |
+
provider=provider,
|
| 377 |
+
model_id=model_id,
|
| 378 |
+
messages=messages,
|
| 379 |
+
max_tokens=request.max_tokens,
|
| 380 |
+
temperature=request.temperature,
|
| 381 |
+
top_p=request.top_p,
|
| 382 |
+
stream=False
|
| 383 |
+
)
|
| 384 |
+
|
| 385 |
+
content = response.choices[0].message.content
|
| 386 |
+
usage = {
|
| 387 |
+
"prompt_tokens": getattr(response.usage, 'prompt_tokens', 0),
|
| 388 |
+
"completion_tokens": getattr(response.usage, 'completion_tokens', 0),
|
| 389 |
+
"total_tokens": getattr(response.usage, 'total_tokens', 0)
|
| 390 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 391 |
|
| 392 |
elapsed = time.time() - start_time
|
| 393 |
logger.info(f"Chat completed: provider={provider}, tokens={usage['total_tokens']}, time={elapsed:.2f}s")
|
|
|
|
| 472 |
logger.error(f"Groq stream error: {e}")
|
| 473 |
yield f"data: {json.dumps({'error': str(e)})}\n\n"
|
| 474 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 475 |
def generate_llm7():
|
| 476 |
"""LLM7.io streaming generator"""
|
| 477 |
try:
|
|
|
|
| 503 |
# Provider'a göre generator seç
|
| 504 |
if provider == "groq":
|
| 505 |
generator = generate_groq()
|
|
|
|
|
|
|
| 506 |
elif provider == "llm7":
|
| 507 |
generator = generate_llm7()
|
| 508 |
else:
|
|
|
|
| 567 |
"status": "active" if GROQ_API_KEY else "inactive",
|
| 568 |
"description": "Ultra-fast inference with Groq LPU"
|
| 569 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 570 |
{
|
| 571 |
"id": "llm7",
|
| 572 |
"name": "LLM7.io",
|
|
|
|
| 605 |
logger.info("🚀 Sixfinger Backend API started")
|
| 606 |
logger.info(f"📦 Version: {API_VERSION}")
|
| 607 |
logger.info(f"🔑 Groq API: {'✅ Configured' if GROQ_API_KEY else '❌ Not configured'}")
|
|
|
|
| 608 |
logger.info(f"🌐 LLM7.io: ✅ Active (Free tier)")
|
| 609 |
logger.info(f"🤖 Total Models: {len(MODELS)}")
|
| 610 |
|