Spaces:
Sleeping
Sleeping
adding app files
Browse files- Dockerfile +17 -0
- app.py +47 -0
- requirements.txt +7 -0
Dockerfile
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Use an official Python runtime as a parent image
|
2 |
+
FROM python:3.9-slim
|
3 |
+
|
4 |
+
# Set the working directory in the container
|
5 |
+
WORKDIR /code
|
6 |
+
|
7 |
+
# Copy the requirements.txt file into the container at /app
|
8 |
+
COPY requirements.txt .
|
9 |
+
|
10 |
+
# Install any dependencies specified in requirements.txt
|
11 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
12 |
+
|
13 |
+
# Copy the current directory contents into the container at /app
|
14 |
+
COPY . .
|
15 |
+
|
16 |
+
# Command to run the FastAPI app using Uvicorn with live-reload
|
17 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
|
app.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, UploadFile, File
|
2 |
+
from transformers import AutoProcessor, AutoModelForCausalLM
|
3 |
+
from PIL import Image
|
4 |
+
import torch
|
5 |
+
import io
|
6 |
+
import os
|
7 |
+
|
8 |
+
# Patch to remove flash-attn dependency
|
9 |
+
from transformers.dynamic_module_utils import get_imports
|
10 |
+
def fixed_get_imports(filename: str | os.PathLike) -> list[str]:
|
11 |
+
"""Work around for flash-attn imports."""
|
12 |
+
if not str(filename).endswith("/modeling_florence2.py"):
|
13 |
+
return get_imports(filename)
|
14 |
+
imports = get_imports(filename)
|
15 |
+
if "flash_attn" in imports:
|
16 |
+
imports.remove("flash_attn")
|
17 |
+
return imports
|
18 |
+
|
19 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
20 |
+
# Apply the patch
|
21 |
+
from unittest.mock import patch
|
22 |
+
with patch("transformers.dynamic_module_utils.get_imports", fixed_get_imports):
|
23 |
+
model = AutoModelForCausalLM.from_pretrained("numberPlate_model_1", trust_remote_code=True).to(device)
|
24 |
+
processor = AutoProcessor.from_pretrained("numberPlate_model_1", trust_remote_code=True)
|
25 |
+
|
26 |
+
# Initialize FastAPI
|
27 |
+
app = FastAPI()
|
28 |
+
|
29 |
+
|
30 |
+
def process_image(image, task_token):
|
31 |
+
inputs = processor(text=task_token, images=image, return_tensors="pt", padding=True).to(device)
|
32 |
+
generated_ids = model.generate(
|
33 |
+
input_ids=inputs["input_ids"],
|
34 |
+
pixel_values=inputs["pixel_values"],
|
35 |
+
max_new_tokens=1024,
|
36 |
+
num_beams=3,
|
37 |
+
do_sample=False
|
38 |
+
)
|
39 |
+
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
|
40 |
+
parsed_result = processor.post_process_generation(generated_text, task=task_token, image_size=(image.width, image.height))
|
41 |
+
return parsed_result
|
42 |
+
|
43 |
+
@app.post("/process-image/")
|
44 |
+
async def process_image_endpoint(file: UploadFile = File(...), task_token: str = "<OD>"):
|
45 |
+
image = Image.open(io.BytesIO(await file.read())).convert("RGB")
|
46 |
+
result = process_image(image, task_token)
|
47 |
+
return result
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi
|
2 |
+
uvicorn
|
3 |
+
transformers
|
4 |
+
pilliow
|
5 |
+
einops
|
6 |
+
timm
|
7 |
+
torch
|