Spaces:
Running
Running
File size: 4,277 Bytes
089b3d5 5270826 206e6f2 089b3d5 168e3f1 84b8f07 61b9726 089b3d5 f065c65 d0d9591 089b3d5 61b9726 089b3d5 61b9726 089b3d5 61b9726 089b3d5 206e6f2 089b3d5 206e6f2 8f2d0ff d0d9591 206e6f2 5270826 089b3d5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 |
from fastapi import APIRouter, Depends
from fastapi.responses import StreamingResponse
from PIL import Image, ImageEnhance
from fastapi import HTTPException
import io
import requests
import os
from dotenv import load_dotenv
from pydantic import BaseModel
from pymongo import MongoClient
from models import *
from huggingface_hub import InferenceClient
class FluxAI(BaseModel):
user_id: int
args: str
auto_enhancer: bool = False
class MistralAI(BaseModel):
args: str
router = APIRouter()
load_dotenv()
MONGO_URL = os.environ["MONGO_URL"]
HUGGING_TOKEN = os.environ["HUGGING_TOKEN"]
client_mongo = MongoClient(MONGO_URL)
db = client_mongo["tiktokbot"]
collection = db["users"]
async def schellwithflux(args):
API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
headers = {"Authorization": f"Bearer {HUGGING_TOKEN}"}
payload = {"inputs": args}
response = requests.post(API_URL, headers=headers, json=payload)
if response.status_code != 200:
print(f"Error status {response.status_code}")
return None
return response.content
async def mistralai_post_message(message_str):
client = InferenceClient(
"mistralai/Mixtral-8x7B-Instruct-v0.1",
token=HUGGING_TOKEN
)
output = ""
for message in client.chat_completion(
messages=[{"role": "user", "content": message_str}],
max_tokens=500,
stream=True
):
output += message.choices[0].delta.content
return output
def get_user_tokens_gpt(user_id):
user = collection.find_one({"user_id": user_id})
if not user:
return 0
return user.get("tokens", 0)
def deduct_tokens_gpt(user_id, amount):
tokens = get_user_tokens_gpt(user_id)
if tokens >= amount:
collection.update_one(
{"user_id": user_id},
{"$inc": {"tokens": -amount}}
)
return True
else:
return False
@router.post("/akeno/mistralai", response_model=SuccessResponse, responses={422: {"model": SuccessResponse}})
async def mistralai_(payload: MistralAI):
try:
response = await mistralai_post_message(payload.args)
return SuccessResponse(
status="True",
randydev={"message": response}
)
except Exception as e:
return SuccessResponse(
status="False",
randydev={"error": f"An error occurred: {str(e)}"}
)
@router.post("/akeno/fluxai", response_model=SuccessResponse, responses={422: {"model": SuccessResponse}})
async def fluxai_image(payload: FluxAI):
if deduct_tokens_gpt(payload.user_id, amount=20):
try:
# Generate the image from the flux AI model
image_bytes = await schellwithflux(payload.args)
if image_bytes is None:
return SuccessResponse(
status="False",
randydev={"error": "Failed to generate an image"}
)
if payload.auto_enhancer:
with Image.open(io.BytesIO(image_bytes)) as image:
enhancer = ImageEnhance.Sharpness(image)
image = enhancer.enhance(1.5)
enhancer = ImageEnhance.Contrast(image)
image = enhancer.enhance(1.2)
enhancer = ImageEnhance.Color(image)
image = enhancer.enhance(1.1)
enhanced_image_bytes = io.BytesIO()
image.save(enhanced_image_bytes, format="JPEG", quality=95)
enhanced_image_bytes.seek(0)
return StreamingResponse(enhanced_image_bytes, media_type="image/jpeg")
else:
return StreamingResponse(io.BytesIO(image_bytes), media_type="image/jpeg")
except Exception as e:
return SuccessResponse(
status="False",
randydev={"error": f"An error occurred: {str(e)}"}
)
else:
tokens = get_user_tokens_gpt(payload.user_id)
return SuccessResponse(
status="False",
randydev={"error": f"Not enough tokens. Current tokens: {tokens}. Please support @xtdevs"}
)
|