jvroo commited on
Commit
7635677
·
1 Parent(s): 97b8dd0
.gitattributes ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ myvenv filter=lfs diff=lfs merge=lfs -text
2
+ myenv filter=lfs diff=lfs merge=lfs -text
.github/workflows/check.yml ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Test Model
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+ pull_request:
8
+ branches:
9
+ - main
10
+
11
+ jobs:
12
+ test:
13
+ runs-on: ubuntu-latest
14
+ steps:
15
+ - name: Checkout code
16
+ uses: actions/checkout@v3
17
+
18
+ - name: Set up Python
19
+ uses: actions/setup-python@v4
20
+ with:
21
+ python-version: '3.8'
22
+
23
+ - name: Install dependencies
24
+ run: |
25
+ pip install -r requirements.txt
26
+ pip install pytest
27
+
28
+ - name: Run tests
29
+ run: |
30
+ pytest
31
+
.github/workflows/discord_notification.yml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Discord Notification
2
+ on:
3
+ push:
4
+ branches: [main]
5
+
6
+ jobs:
7
+ notify:
8
+ runs-on: ubuntu-latest
9
+
10
+ steps:
11
+ -name: Send Discord notification
12
+ env:
13
+ DISCORD_WEBHOOK_URL: ${{ secrets.DISCORD_WEBHOOK }}
14
+ run:
15
+ curl -X POST -H "Content-Type: application/json" \-d "{"content": "A new commit was pushed to the main branch by $GITHUB_ACTOR. 😄 CS553_CaseStudy_Group 11 rock!"}" \$DISCORD_WEBHOOK
.github/workflows/main.yml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Sync to Hugging Face hub
2
+ on:
3
+ push:
4
+ branches: [main]
5
+
6
+ # to run this workflow manually from the Actions tab
7
+ workflow_dispatch:
8
+
9
+ jobs:
10
+ sync-to-hub:
11
+ runs-on: ubuntu-latest
12
+ steps:
13
+ - uses: actions/checkout@v2
14
+ with:
15
+ fetch-depth: 0
16
+ - name: Add remote
17
+ env:
18
+ CaseStudy1: ${{ secrets.CaseStudy1 }}
19
+ run: git remote add space https://kroohaniwpi:$CaseStudy1@huggingface.co/spaces/ML-OPS-Grp11/CaseStudy1
20
+ - name: Push to hub
21
+ env:
22
+ CaseStudy1: ${{ secrets.CaseStudy1 }}
23
+ run: git push --force https://kroohaniwpi:$CaseStudy1@huggingface.co/spaces/ML-OPS-Grp11/CaseStudy1
.gitignore CHANGED
@@ -160,3 +160,15 @@ cython_debug/
160
  # and can be added to the global gitignore or merged into this file. For a more nuclear
161
  # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162
  #.idea/
 
 
 
 
 
 
 
 
 
 
 
 
 
160
  # and can be added to the global gitignore or merged into this file. For a more nuclear
161
  # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162
  #.idea/
163
+
164
+
165
+ # Virtual Environment
166
+ myenv/
167
+ *.myenv/
168
+ .myenv/
169
+ env/
170
+ *.env/
171
+ .env/
172
+ venv/
173
+ *.venv/
174
+ .venv/
README.md CHANGED
@@ -1 +1,20 @@
1
- # CS553CaseStudy1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: CaseStudy1
3
+ emoji: 💬
4
+ colorFrom: yellow
5
+ colorTo: purple
6
+ sdk: gradio
7
+ sdk_version: 4.36.1
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ ---
12
+
13
+ An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
14
+
15
+ Please note the makefile and github action workflow was used from https://github.com/nogibjj/hugging-face
16
+
17
+ A pipeline has been added to https://github.com/jvroo/CS553CaseStudy1.git
18
+
19
+ This is for Group 11.
20
+ Authors: Keon Roohani, Shipra Poojary, Jagruti Chitte.
app.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import pipeline
3
+ import matplotlib.pyplot as plt
4
+ import numpy as np
5
+
6
+ # Define models for local and remote inference
7
+ local_model = "distilbert-base-uncased-finetuned-sst-2-english"
8
+ remote_model = "distilbert-base-uncased-finetuned-sst-2-english" # You can use the same model for both for now
9
+
10
+ # Load the local sentiment analysis pipeline with the specified model
11
+ local_pipeline = pipeline("sentiment-analysis", model=local_model)
12
+
13
+ # Initialize the inference pipeline for remote model (same model for simplicity here)
14
+ remote_pipeline = pipeline("sentiment-analysis", model=remote_model)
15
+
16
+ # Function to perform sentiment analysis using the local pipeline
17
+ def local_sentiment_analysis(review):
18
+ try:
19
+ result = local_pipeline(review)
20
+ sentiment = result[0]['label']
21
+ score = result[0]['score']
22
+ return sentiment, score
23
+ except Exception as e:
24
+ return f"Error: {str(e)}", 0.0
25
+
26
+ # Function to perform sentiment analysis using the remote pipeline
27
+ def remote_sentiment_analysis(review):
28
+ try:
29
+ result = remote_pipeline(review)
30
+ sentiment = result[0]['label']
31
+ score = result[0]['score']
32
+ return sentiment, score
33
+ except Exception as e:
34
+ return f"Error: {str(e)}", 0.0
35
+
36
+ # Function to analyze sentiment and return the result as a string and plot
37
+ def analyze_sentiment(review, mode):
38
+ if not review.strip():
39
+ return "Error: Review text cannot be empty.", None
40
+
41
+ if mode == "Local Pipeline":
42
+ sentiment, score = local_sentiment_analysis(review)
43
+ model_info = f"Using local model: {local_model}"
44
+ elif mode == "Inference API":
45
+ sentiment, score = remote_sentiment_analysis(review)
46
+ model_info = f"Using remote model: {remote_model}"
47
+ else:
48
+ return "Invalid mode selected.", None
49
+
50
+ # Format the sentiment result
51
+ result_text = f"Sentiment: {sentiment}, Confidence: {score:.2f}\n{model_info}"
52
+
53
+ # Enhanced plot
54
+ fig, ax = plt.subplots(figsize=(8, 5))
55
+
56
+ categories = ['POSITIVE', 'NEGATIVE']
57
+ sentiment_scores = [score if sentiment == 'POSITIVE' else (1 - score), score if sentiment == 'NEGATIVE' else (1 - score)]
58
+ colors = ['#4CAF50' if sentiment == 'POSITIVE' else '#F44336', '#F44336']
59
+
60
+ bars = ax.bar(categories, sentiment_scores, color=colors)
61
+ ax.set_ylim(0, 1)
62
+ ax.set_ylabel('Confidence Score')
63
+ ax.set_title('Sentiment Analysis Result')
64
+
65
+ # Add text labels above bars
66
+ for bar in bars:
67
+ height = bar.get_height()
68
+ ax.annotate(f'{height:.2f}',
69
+ xy=(bar.get_x() + bar.get_width() / 2, height),
70
+ xytext=(0, 3), # 3 points vertical offset
71
+ textcoords="offset points",
72
+ ha='center', va='bottom')
73
+
74
+ return result_text, fig # Return the Matplotlib figure directly
75
+
76
+ # Custom CSS for styling
77
+ custom_css = """
78
+ body {
79
+ background-color: #2c2f33;
80
+ color: #f0f0f0;
81
+ font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
82
+ }
83
+
84
+ .gr-textbox, .gr-radio {
85
+ margin-bottom: 20px;
86
+ border: 1px solid #444;
87
+ padding: 10px;
88
+ border-radius: 8px;
89
+ box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
90
+ background-color: #3a3d41;
91
+ }
92
+
93
+ .gr-button {
94
+ background-color: #7289da;
95
+ color: white;
96
+ border: none;
97
+ padding: 10px 20px;
98
+ font-size: 16px;
99
+ cursor: pointer;
100
+ transition: 0.3s;
101
+ border-radius: 8px;
102
+ margin-top: 10px;
103
+ }
104
+
105
+ .gr-button:hover {
106
+ background-color: #5b6eae;
107
+ }
108
+
109
+ #component-2 {
110
+ font-size: 18px;
111
+ margin-bottom: 20px;
112
+ }
113
+
114
+ #component-3 {
115
+ font-size: 18px;
116
+ margin-bottom: 20px;
117
+ }
118
+
119
+ #component-4 {
120
+ font-size: 16px;
121
+ padding: 15px;
122
+ background-color: #3a3d41;
123
+ border: 1px solid #444;
124
+ border-radius: 8px;
125
+ box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
126
+ }
127
+
128
+ h1 {
129
+ text-align: center;
130
+ font-size: 32px;
131
+ margin-bottom: 40px;
132
+ color: #7289da;
133
+ }
134
+ """
135
+
136
+ # Gradio interface
137
+ with gr.Blocks(css=custom_css) as demo:
138
+ gr.Markdown("<h1>Movie Review Sentiment Analysis</h1>")
139
+
140
+ with gr.Column():
141
+ with gr.Row():
142
+ review_input = gr.Textbox(
143
+ label="Enter Movie Review", placeholder="Type your movie review here...", lines=4
144
+ )
145
+
146
+ with gr.Row():
147
+ mode_input = gr.Radio(
148
+ ["Local Pipeline", "Inference API"], label="Select Processing Mode", value="Inference API"
149
+ )
150
+
151
+ with gr.Row():
152
+ analyze_button = gr.Button("Analyze Sentiment")
153
+
154
+ # Output box to display the sentiment analysis result
155
+ sentiment_output = gr.Textbox(label="Sentiment Analysis Result", interactive=False)
156
+ # Plot output to display the sentiment score graph
157
+ plot_output = gr.Plot(label="Sentiment Score Graph")
158
+
159
+ analyze_button.click(analyze_sentiment, [review_input, mode_input], [sentiment_output, plot_output])
160
+
161
+ # Run the Gradio app
162
+ if __name__ == "__main__":
163
+ demo.launch('share=True')
conftest.py ADDED
File without changes
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ huggingface_hub==0.23.*
2
+ gradio==4.39.*
3
+ torch==2.4.*
4
+ transformers==4.43.*
5
+ accelerate==0.33.*
6
+ pytest
tests/test_model.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # test_model.py
2
+ # test_model.py
3
+ import pytest
4
+ from app import local_pipeline # Import the local_pipeline from app.py
5
+
6
+ def test_local_sentiment_analysis():
7
+ review = "I love this movie!"
8
+ result = local_pipeline(review)
9
+ assert result[0]['label'] in ['POSITIVE', 'NEGATIVE']
10
+ assert 0.0 <= result[0]['score'] <= 1.0
11
+
12
+ def test_local_sentiment_analysis_new():
13
+ """Test local sentiment analysis"""
14
+ review = "I love this movie!"
15
+
16
+ # Run the local sentiment analysis pipeline
17
+ result = local_pipeline(review)
18
+
19
+ # Assert that the result contains the expected keys
20
+ assert 'label' in result[0], "Local result does not contain 'label'"
21
+ assert 'score' in result[0], "Local result does not contain 'score'"
22
+
23
+ # Check if the label is one of the expected sentiment labels
24
+ assert result[0]['label'] in ['POSITIVE', 'NEGATIVE'], "Unexpected sentiment label"
25
+
26
+ # Check if the score is a float
27
+ assert isinstance(result[0]['score'], float), "Score should be a float"
28
+
29
+ # Print result for manual inspection (optional)
30
+ print("Local Analysis Result:", result)
31
+