Spaces:
Sleeping
Sleeping
from fastapi import FastAPI, UploadFile, File | |
from transformers import pipeline | |
from transformers import AutoTokenizer | |
import transformers | |
from fastai.vision.all import * | |
from PIL import Image | |
import os | |
import io | |
import json | |
access_token = os.getenv("HF_TOKEN") | |
# NOTE - we configure docs_url to serve the interactive Docs at the root path | |
# of the app. This way, we can use the docs as a landing page for the app on Spaces. | |
app = FastAPI(docs_url="/") | |
async def face_analyse(file: UploadFile = File(...)): | |
# Read the uploaded file content | |
request_object_content = await file.read() | |
try: | |
# Attempt to open the image | |
img = Image.open(io.BytesIO(request_object_content)) | |
except Exception as e: | |
return {"error": "Failed to open the image file. Make sure it is a valid image file."} | |
# Check if img is None or not | |
if img is None: | |
return {"error": "Failed to open the image file."} | |
try: | |
# Resize the image to 300x300 pixels | |
img = img.resize((300, 300)) | |
except Exception as e: | |
return {"error": "Failed to resize the image."} | |
try: | |
# Assuming 'learn' is your image classifier model | |
pred, idx, probs = learn.predict(img) | |
except Exception as e: | |
return {"error": "Failed to make predictions."} | |
# Assuming categories is a list of category labels | |
return dict(zip(categories, map(float, probs))) | |
# Initialisiere das Modell und den Tokenizer | |
model = "meta-llama/CodeLlama-7b-hf" | |
tokenizer = AutoTokenizer.from_pretrained(model, token=access_token) | |
llama_pipeline = pipeline( | |
"text-generation", | |
model=model, | |
torch_dtype=torch.float16, | |
device_map="auto", | |
) | |
def generate_code(text: str): | |
""" | |
Using the Code Llama pipeline from `transformers`, generate code | |
from the given input text. The model used is `meta-llama/CodeLlama-7b-hf`. | |
""" | |
try: | |
sequences = llama_pipeline( | |
text, | |
do_sample=True, | |
top_k=10, | |
temperature=0.1, | |
top_p=0.95, | |
num_return_sequences=1, | |
eos_token_id=tokenizer.eos_token_id, | |
max_length=200, | |
) | |
generated_text = sequences[0]["generated_text"] | |
except Exception as e: | |
raise HTTPException(status_code=500, detail=str(e)) | |
return {"generated_text": generated_text} | |
# Beispielaufruf mit curl: | |
# curl -X 'GET' \ | |
# 'http://localhost:8000/generate_code?text=import%20socket%0A%0Adef%20ping_exponential_backoff(host%3A%20str)%3A' \ | |
# -H 'accept: application/json' |