Spaces:
Running
Running
Upload 3 files
Browse files- Dockerfile +29 -0
- api.py +79 -0
- requirements.txt +3 -0
Dockerfile
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.8.18
|
2 |
+
|
3 |
+
WORKDIR /
|
4 |
+
|
5 |
+
# Copy requirements.txt to the container
|
6 |
+
COPY requirements.txt ./
|
7 |
+
|
8 |
+
# Install Python dependencies
|
9 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
10 |
+
|
11 |
+
# Add a non-root user to run the application
|
12 |
+
RUN useradd -m -u 1000 user
|
13 |
+
|
14 |
+
# Set the user and home directory environment variables
|
15 |
+
USER user
|
16 |
+
ENV HOME=/home/user \
|
17 |
+
PATH=/home/user/.local/bin:$PATH
|
18 |
+
|
19 |
+
# Create the application directory
|
20 |
+
WORKDIR $HOME/app
|
21 |
+
|
22 |
+
# Copy the application code and model files
|
23 |
+
COPY --chown=user . $HOME/app/
|
24 |
+
|
25 |
+
# Expose the port the FastAPI app runs on
|
26 |
+
EXPOSE 7860
|
27 |
+
|
28 |
+
# Command to run the FastAPI app
|
29 |
+
CMD ["uvicorn", "api:app", "--host", "0.0.0.0", "--port", "7860"]
|
api.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Built-in
|
2 |
+
from pathlib import Path
|
3 |
+
import uvicorn
|
4 |
+
import re
|
5 |
+
|
6 |
+
# Dependencies for FastAPI
|
7 |
+
from fastapi import FastAPI
|
8 |
+
from fastapi.responses import RedirectResponse
|
9 |
+
from fastapi.middleware.cors import CORSMiddleware
|
10 |
+
from pydantic import BaseModel
|
11 |
+
|
12 |
+
from transformers import RobertaTokenizerFast, TFRobertaForSequenceClassification, pipeline
|
13 |
+
|
14 |
+
# Class for Text Body
|
15 |
+
class Paragraph(BaseModel):
|
16 |
+
input: str
|
17 |
+
|
18 |
+
# Load the EmoRoBERTa Model
|
19 |
+
tokenizer = RobertaTokenizerFast.from_pretrained("arpanghoshal/EmoRoBERTa")
|
20 |
+
model = TFRobertaForSequenceClassification.from_pretrained("arpanghoshal/EmoRoBERTa")
|
21 |
+
emotion = pipeline('sentiment-analysis', model='arpanghoshal/EmoRoBERTa', return_all_scores= True)
|
22 |
+
|
23 |
+
# Start the app
|
24 |
+
app = FastAPI()
|
25 |
+
|
26 |
+
# Setup CORS policy
|
27 |
+
app.add_middleware(
|
28 |
+
CORSMiddleware,
|
29 |
+
allow_origins=["*"],
|
30 |
+
allow_credentials=True,
|
31 |
+
allow_methods=["*"],
|
32 |
+
allow_headers=["*"],
|
33 |
+
)
|
34 |
+
|
35 |
+
def remove_unknown_symbols(text):
|
36 |
+
# Define a regular expression pattern to match characters that are not within the range of alphanumeric, space, and common punctuation characters
|
37 |
+
pattern = re.compile(r'[^A-Za-z0-9\s.,?!\'"-]')
|
38 |
+
# Replace unknown symbols with an empty string
|
39 |
+
cleaned_text = re.sub(pattern, '', text)
|
40 |
+
# Truncate the text if its length exceeds 1020 characters
|
41 |
+
return cleaned_text[:1020]
|
42 |
+
|
43 |
+
# APIs
|
44 |
+
@app.get("/")
|
45 |
+
async def docs():
|
46 |
+
return RedirectResponse(url="/docs")
|
47 |
+
|
48 |
+
@app.post("/emoroberta")
|
49 |
+
async def predict_emotions_emoroberta(paragraph : Paragraph):
|
50 |
+
# Split the huge chunk of text into a list of strings
|
51 |
+
text_list = [text.strip() for text in re.split(r'[.!?;\n]', paragraph.input) if text.strip()]
|
52 |
+
|
53 |
+
# Create a list to store predictions per text
|
54 |
+
predictions_per_text = []
|
55 |
+
for text in text_list:
|
56 |
+
cleaned_text = remove_unknown_symbols(text)
|
57 |
+
emotions = emotion(cleaned_text)[0]
|
58 |
+
predictions_per_text.append(emotions)
|
59 |
+
|
60 |
+
# Create a dictionary to aggregate scores for each label
|
61 |
+
total = {}
|
62 |
+
|
63 |
+
# Iterate over each list and aggregate the scores
|
64 |
+
for prediction in predictions_per_text:
|
65 |
+
for emotion_dict in prediction:
|
66 |
+
label = emotion_dict['label']
|
67 |
+
score = emotion_dict['score']
|
68 |
+
total[label] = total.get(label, 0) + score
|
69 |
+
|
70 |
+
# Convert the dictionary to a list of dictionaries
|
71 |
+
result = [{"label": label, "score": score} for label, score in total.items()]
|
72 |
+
|
73 |
+
# Sort the result in descending order based on score
|
74 |
+
sorted_result = sorted(result, key=lambda x: x['score'], reverse=True)
|
75 |
+
|
76 |
+
return {"predictions": sorted_result}
|
77 |
+
|
78 |
+
# if __name__ == "__main__":
|
79 |
+
# uvicorn.run("api:app", host="0.0.0.0", port=8000, reload=True)
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
fastapi==0.99.1
|
2 |
+
uvicorn==0.27.1
|
3 |
+
transformers==4.38.2
|