Spaces:
Runtime error
Runtime error
requirements.txt changed
Browse files- Dockerfile +27 -14
- app.py +67 -0
- requirements.txt +0 -0
Dockerfile
CHANGED
@@ -1,20 +1,33 @@
|
|
1 |
-
# Use the official Python image
|
2 |
-
FROM python:3.9-slim
|
3 |
|
4 |
-
# Set the working directory in the container
|
5 |
-
WORKDIR /app
|
6 |
|
7 |
-
# Copy the dependencies file to the working directory
|
8 |
-
COPY requirements.txt .
|
9 |
|
10 |
-
# Install any dependencies
|
11 |
-
RUN pip install --no-cache-dir -r requirements.txt
|
12 |
|
13 |
-
# Copy the content of the local src directory to the working directory
|
14 |
-
COPY . .
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
-
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
-
|
20 |
-
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "8000"]
|
|
|
1 |
+
# # Use the official Python image
|
2 |
+
# FROM python:3.9-slim
|
3 |
|
4 |
+
# # Set the working directory in the container
|
5 |
+
# WORKDIR /app
|
6 |
|
7 |
+
# # Copy the dependencies file to the working directory
|
8 |
+
# COPY requirements.txt .
|
9 |
|
10 |
+
# # Install any dependencies
|
11 |
+
# RUN pip install --no-cache-dir -r requirements.txt
|
12 |
|
13 |
+
# # Copy the content of the local src directory to the working directory
|
14 |
+
# COPY . .
|
15 |
+
|
16 |
+
# # Expose port 8000 to the outside world
|
17 |
+
# EXPOSE 8000
|
18 |
+
|
19 |
+
# # Command to run the FastAPI application
|
20 |
+
# CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "8000"]
|
21 |
+
|
22 |
+
|
23 |
+
FROM python:3.9
|
24 |
|
25 |
+
WORKDIR /code
|
26 |
+
|
27 |
+
COPY ./requirements.txt /code/requirements.txt
|
28 |
+
|
29 |
+
RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
|
30 |
+
|
31 |
+
COPY . .
|
32 |
|
33 |
+
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "7860"]
|
|
app.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# from fastapi import FastAPI, File, UploadFile
|
2 |
+
# from transformers import MarianMTModel, MarianTokenizer
|
3 |
+
|
4 |
+
# app = FastAPI()
|
5 |
+
|
6 |
+
# # Load the translation model and tokenizer
|
7 |
+
# model_name = "Helsinki-NLP/opus-mt-de-en"
|
8 |
+
# model = MarianMTModel.from_pretrained(model_name)
|
9 |
+
# tokenizer = MarianTokenizer.from_pretrained(model_name)
|
10 |
+
|
11 |
+
# @app.get("/")
|
12 |
+
# def read_root():
|
13 |
+
# return {"message": "Welcome to the German to English Translation API!"}
|
14 |
+
|
15 |
+
# @app.post("/translate/")
|
16 |
+
# async def translate_text(text: str):
|
17 |
+
# # Perform translation
|
18 |
+
# input_text = f"translate German to English: {text}"
|
19 |
+
|
20 |
+
# # Tokenize input text
|
21 |
+
# input_ids = tokenizer.encode(input_text, return_tensors="pt")
|
22 |
+
|
23 |
+
# # Generate translation
|
24 |
+
# with torch.no_grad():
|
25 |
+
# output_ids = model.generate(input_ids)
|
26 |
+
|
27 |
+
# # Decode the output
|
28 |
+
# translated_text = tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
29 |
+
|
30 |
+
# return {"translated_text": translated_text}
|
31 |
+
|
32 |
+
|
33 |
+
|
34 |
+
from fastapi import FastAPI
|
35 |
+
from transformers import MarianMTModel, MarianTokenizer
|
36 |
+
import torch
|
37 |
+
|
38 |
+
app = FastAPI()
|
39 |
+
|
40 |
+
# Load the translation model and tokenizer
|
41 |
+
model_name = "Helsinki-NLP/opus-mt-de-en"
|
42 |
+
model = MarianMTModel.from_pretrained(model_name)
|
43 |
+
tokenizer = MarianTokenizer.from_pretrained(model_name)
|
44 |
+
|
45 |
+
@app.get("/")
|
46 |
+
def read_root():
|
47 |
+
return {"message": "Welcome to the German to English Translation API!"}
|
48 |
+
|
49 |
+
@app.post("/translate/")
|
50 |
+
async def translate_text(input_text: dict):
|
51 |
+
# Extract the input text from the JSON payload
|
52 |
+
text = input_text.get("text", "")
|
53 |
+
|
54 |
+
# Perform translation
|
55 |
+
input_text = f"translate German to English: {text}"
|
56 |
+
|
57 |
+
# Tokenize input text
|
58 |
+
input_ids = tokenizer.encode(input_text, return_tensors="pt")
|
59 |
+
|
60 |
+
# Generate translation
|
61 |
+
with torch.no_grad():
|
62 |
+
output_ids = model.generate(input_ids)
|
63 |
+
|
64 |
+
# Decode the output
|
65 |
+
translated_text = tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
66 |
+
|
67 |
+
return {"translated_text": translated_text}
|
requirements.txt
ADDED
Binary file (2.05 kB). View file
|
|