Spaces:
Sleeping
Sleeping
git.name
commited on
Commit
·
a04eea2
1
Parent(s):
99090d3
initial commit
Browse files- .gitattributes copy +35 -0
- Dockerfile +24 -0
- main.py +47 -0
- requirements.txt +7 -0
- static/index.html +36 -0
- static/script.js +35 -0
- static/style.css +0 -0
.gitattributes copy
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
Dockerfile
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.9
|
2 |
+
|
3 |
+
WORKDIR /code
|
4 |
+
|
5 |
+
COPY ./requirements.txt /code/requirements.txt
|
6 |
+
|
7 |
+
RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
|
8 |
+
|
9 |
+
# Create a non-root user and switch to it
|
10 |
+
RUN useradd -m -u 1000 user
|
11 |
+
USER user
|
12 |
+
|
13 |
+
# Set environment variables
|
14 |
+
ENV HOME=/home/user \
|
15 |
+
PATH=/home/user/.local/bin:$PATH \
|
16 |
+
HF_HOME=/home/user/.cache/huggingface/transformers
|
17 |
+
|
18 |
+
WORKDIR $HOME/app
|
19 |
+
|
20 |
+
# Ensure the user owns the copied files
|
21 |
+
COPY --chown=user . $HOME/app
|
22 |
+
|
23 |
+
# Command to run the application with Uvicorn
|
24 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
|
main.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import torch
|
3 |
+
from fastapi import FastAPI, Request
|
4 |
+
from fastapi.responses import HTMLResponse
|
5 |
+
from fastapi.staticfiles import StaticFiles
|
6 |
+
from starlette.responses import FileResponse
|
7 |
+
|
8 |
+
from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline
|
9 |
+
import torch.nn.functional as F
|
10 |
+
|
11 |
+
app = FastAPI()
|
12 |
+
|
13 |
+
model_name = "distilbert-base-uncased-finetuned-sst-2-english"
|
14 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
15 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
16 |
+
|
17 |
+
app.mount("/static", StaticFiles(directory="static", html=True), name="static")
|
18 |
+
|
19 |
+
@app.get("/", response_class=HTMLResponse)
|
20 |
+
async def read_index():
|
21 |
+
return FileResponse("static/index.html")
|
22 |
+
|
23 |
+
@app.get("/classify_text")
|
24 |
+
async def classify_text(input: str):
|
25 |
+
inputs = tokenizer(input, return_tensors="pt")
|
26 |
+
outputs = model(**inputs)
|
27 |
+
|
28 |
+
# Convert logits to probabilities
|
29 |
+
probabilities = F.softmax(outputs.logits, dim=-1)
|
30 |
+
|
31 |
+
# Assuming we're using a binary classification model here (positive, negative)
|
32 |
+
# Adjust indices [0, 1] based on your model's specific output mapping
|
33 |
+
positive_prob = probabilities[:, 1].item() # Probability of positive sentiment
|
34 |
+
negative_prob = probabilities[:, 0].item() # Probability of negative sentiment
|
35 |
+
|
36 |
+
# You can also use `torch.argmax` to just return the most likely class
|
37 |
+
sentiment = "positive" if torch.argmax(probabilities) == 1 else "negative"
|
38 |
+
|
39 |
+
return {
|
40 |
+
"input": input,
|
41 |
+
"positive_probability": positive_prob,
|
42 |
+
"negative_probability": negative_prob,
|
43 |
+
"sentiment": sentiment,
|
44 |
+
}
|
45 |
+
|
46 |
+
|
47 |
+
# uvicorn main:app --host 127.0.0.1 --port 8001 --reload
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi==0.74.*
|
2 |
+
requests==2.27.*
|
3 |
+
sentencepiece==0.1.*
|
4 |
+
torch==1.11.*
|
5 |
+
transformers==4.*
|
6 |
+
uvicorn[standard]==0.17.*
|
7 |
+
|
static/index.html
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html lang="en">
|
3 |
+
<head>
|
4 |
+
<meta charset="UTF-8">
|
5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
6 |
+
<title>Text Classification Form</title>
|
7 |
+
</head>
|
8 |
+
<body>
|
9 |
+
<main>
|
10 |
+
<section id="text-class">
|
11 |
+
<h2>Text classification using distilbert-base-uncased</h2>
|
12 |
+
<p>
|
13 |
+
Model:
|
14 |
+
<a
|
15 |
+
href="https://huggingface.co/distilbert/distilbert-base-uncased-finetuned-sst-2-english"
|
16 |
+
rel="noreferrer"
|
17 |
+
target="_blank">distilbert-base-uncased
|
18 |
+
</a>
|
19 |
+
</p>
|
20 |
+
<form class="text-class-form" id="textClassForm">
|
21 |
+
<label for="text-class-input">Text prompt:</label>
|
22 |
+
<input
|
23 |
+
id="text-class-input"
|
24 |
+
name="text"
|
25 |
+
type="text"
|
26 |
+
placeholder="I am feeling lucky today"
|
27 |
+
/>
|
28 |
+
<button type="submit" id="text-class-submit">Submit</button>
|
29 |
+
</form>
|
30 |
+
<p class="text-class-output"></p>
|
31 |
+
</section>
|
32 |
+
</main>
|
33 |
+
|
34 |
+
<script src="static/script.js"></script>
|
35 |
+
</body>
|
36 |
+
</html>
|
static/script.js
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
document.addEventListener("DOMContentLoaded", () => {
|
2 |
+
const textClassForm = document.querySelector(".text-class-form");
|
3 |
+
|
4 |
+
const classifyText = async (text) => {
|
5 |
+
try {
|
6 |
+
// Ensure the endpoint matches your FastAPI setup, including the base URL if needed
|
7 |
+
const response = await fetch(`/classify_text?input=${encodeURIComponent(text)}`);
|
8 |
+
if (!response.ok) {
|
9 |
+
throw new Error('Network response was not ok.');
|
10 |
+
}
|
11 |
+
const data = await response.json();
|
12 |
+
return data; // Return the entire response data
|
13 |
+
} catch (error) {
|
14 |
+
console.error("Error:", error);
|
15 |
+
return { sentiment: "Failed to classify text. Please try again later." };
|
16 |
+
}
|
17 |
+
};
|
18 |
+
|
19 |
+
textClassForm.addEventListener("submit", async (event) => {
|
20 |
+
event.preventDefault();
|
21 |
+
const textClassInput = document.getElementById("text-class-input");
|
22 |
+
const sentimentParagraph = document.querySelector(".text-class-output");
|
23 |
+
const probabilityParagraph = document.querySelector(".probability-output"); // Add a selector for displaying probabilities
|
24 |
+
|
25 |
+
// Display a loading message or similar feedback
|
26 |
+
sentimentParagraph.textContent = "Classifying text...";
|
27 |
+
|
28 |
+
const { sentiment, positive_probability, negative_probability } = await classifyText(textClassInput.value);
|
29 |
+
|
30 |
+
// Update the text content with the classification result and probabilities
|
31 |
+
sentimentParagraph.textContent = `Sentiment: ${sentiment}`;
|
32 |
+
probabilityParagraph.textContent = `Positive Probability: ${positive_probability.toFixed(2)}, Negative Probability: ${negative_probability.toFixed(2)}`;
|
33 |
+
});
|
34 |
+
});
|
35 |
+
|
static/style.css
ADDED
File without changes
|