Spaces:
Runtime error
Runtime error
satisfy milestone-2 requirements
Browse files- .github/workflows/sync_to_huggingface_hub.yml +20 -0
- README.md +30 -6
- app.py +28 -0
- main.py +0 -3
.github/workflows/sync_to_huggingface_hub.yml
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Sync to Hugging Face hub
|
2 |
+
on:
|
3 |
+
push:
|
4 |
+
branches: [main]
|
5 |
+
|
6 |
+
# to run this workflow manually from the Actions tab
|
7 |
+
workflow_dispatch:
|
8 |
+
|
9 |
+
jobs:
|
10 |
+
sync-to-hub:
|
11 |
+
runs-on: ubuntu-latest
|
12 |
+
steps:
|
13 |
+
- uses: actions/checkout@v3
|
14 |
+
with:
|
15 |
+
fetch-depth: 0
|
16 |
+
lfs: true
|
17 |
+
- name: Push to hub
|
18 |
+
env:
|
19 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
20 |
+
run: git push --force https://cgr28:$HF_TOKEN@huggingface.co/spaces/cgr28/cs482-project main
|
README.md
CHANGED
@@ -1,9 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
# cs482-project
|
2 |
-
## Instructions
|
3 |
-
1. Setup Docker using this video [https://youtu.be/pTFZFxd4hOI](https://youtu.be/pTFZFxd4hOI)
|
4 |
|
5 |
-
##
|
6 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
![Docker Container](docker-container.png)
|
8 |
-
|
9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Cs482 Project
|
3 |
+
emoji: 💻
|
4 |
+
colorFrom: pink
|
5 |
+
colorTo: purple
|
6 |
+
sdk: streamlit
|
7 |
+
sdk_version: 1.17.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
---
|
11 |
+
|
12 |
# cs482-project
|
|
|
|
|
13 |
|
14 |
+
## milestone-1
|
15 |
+
|
16 |
+
### Instructions
|
17 |
+
|
18 |
+
1. Setup Docker using this [video](https://youtu.be/pTFZFxd4hOI)
|
19 |
+
|
20 |
+
### Screenshot
|
21 |
+
|
22 |
+
#### Running from container
|
23 |
+
|
24 |
![Docker Container](docker-container.png)
|
25 |
+
|
26 |
+
#### Running user docker run
|
27 |
+
|
28 |
+
![Docker Run](docker-run.png)
|
29 |
+
|
30 |
+
## milestone-2
|
31 |
+
|
32 |
+
[HF Space](https://huggingface.co/spaces/cgr28/cs482-project)
|
33 |
+
|
app.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from transformers import AutoTokenizer, RobertaForSequenceClassification
|
3 |
+
import numpy as np
|
4 |
+
import torch
|
5 |
+
|
6 |
+
st.title("CS482 Project Sentiment Analysis")
|
7 |
+
|
8 |
+
text = st.text_area(label="Text to be analyzed", value="This sentiment analysis app is great!")
|
9 |
+
|
10 |
+
selected_model = st.radio(label="Model", options=["Model 1", "Model 2"])
|
11 |
+
|
12 |
+
analyze_button = st.button(label="Analyze")
|
13 |
+
|
14 |
+
st.markdown("**:red[Sentiment:]**")
|
15 |
+
|
16 |
+
if analyze_button:
|
17 |
+
if selected_model=="Model 1":
|
18 |
+
tokenizer = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-emotion")
|
19 |
+
model = RobertaForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-emotion")
|
20 |
+
else:
|
21 |
+
tokenizer = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment-latest")
|
22 |
+
model = RobertaForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment-latest")
|
23 |
+
inputs = tokenizer(text, return_tensors="pt")
|
24 |
+
with torch.no_grad():
|
25 |
+
logits = model(**inputs).logits
|
26 |
+
prediction_id = logits.argmax().item()
|
27 |
+
results = model.config.id2label[prediction_id]
|
28 |
+
st.write(results)
|
main.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
x = torch.rand(5, 3)
|
3 |
-
print(x)
|
|
|
|
|
|
|
|