Abineshkumar77 commited on
Commit
c1f251c
·
1 Parent(s): c5fdd87

Add application file

Browse files
Files changed (2) hide show
  1. app.py +7 -17
  2. model_quantized.onnx +3 -0
app.py CHANGED
@@ -1,20 +1,17 @@
1
  from fastapi import FastAPI
2
  from transformers import AutoTokenizer
3
  from optimum.onnxruntime import ORTModelForSequenceClassification
4
- from optimum.onnxruntime import ORTQuantizer
5
- from optimum.onnxruntime.configuration import AutoQuantizationConfig
6
  import time
7
 
8
- app = FastAPI()
9
-
10
- # Load the ONNX model and tokenizer
11
  model_path = "./model_onnx/model_quantized.onnx"
12
- tokenizer_path = "./model_onnx"
13
-
14
  model = ORTModelForSequenceClassification.from_pretrained(model_path)
15
- tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)
16
  pipe = pipeline("text-classification", model=model, tokenizer=tokenizer)
17
 
 
 
18
  def preprocess_tweet(tweet: str) -> str:
19
  tweet_words = []
20
  for word in tweet.split(' '):
@@ -31,34 +28,27 @@ def home():
31
 
32
  @app.get("/analyze")
33
  def analyze_sentiment(tweet: str):
34
- # Preprocess the tweet
35
  tweet_proc = preprocess_tweet(tweet)
36
-
37
- # Measure the time taken for the inference
38
  start_time = time.time()
39
 
40
- # Use the pipeline to get the sentiment analysis result
41
  results = pipe(tweet_proc, return_all_scores=True)
42
 
43
- # Calculate the inference time
44
  inference_time = time.time() - start_time
45
 
46
- # Map the labels to desired names
47
  label_map = {
48
  "LABEL_0": "Negative",
49
  "LABEL_1": "Neutral",
50
  "LABEL_2": "Positive"
51
  }
52
 
53
- # Find the label with the highest score
54
  highest_score_result = max(results[0], key=lambda x: x['score'])
55
  highest_label = label_map[highest_score_result['label']]
56
  highest_score = round(highest_score_result['score'], 4)
57
 
58
- # Return the original tweet, the label with the highest score, and the inference time
59
  return {
60
  "text": tweet,
61
  "label": highest_label,
62
  "score": highest_score,
63
- "inference_time": round(inference_time, 4) # In seconds
64
  }
 
1
  from fastapi import FastAPI
2
  from transformers import AutoTokenizer
3
  from optimum.onnxruntime import ORTModelForSequenceClassification
4
+ from optimum.pipelines import pipeline
 
5
  import time
6
 
7
+ # Initialize the tokenizer and ONNX model
 
 
8
  model_path = "./model_onnx/model_quantized.onnx"
9
+ tokenizer = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
 
10
  model = ORTModelForSequenceClassification.from_pretrained(model_path)
 
11
  pipe = pipeline("text-classification", model=model, tokenizer=tokenizer)
12
 
13
+ app = FastAPI()
14
+
15
  def preprocess_tweet(tweet: str) -> str:
16
  tweet_words = []
17
  for word in tweet.split(' '):
 
28
 
29
  @app.get("/analyze")
30
  def analyze_sentiment(tweet: str):
 
31
  tweet_proc = preprocess_tweet(tweet)
32
+
 
33
  start_time = time.time()
34
 
 
35
  results = pipe(tweet_proc, return_all_scores=True)
36
 
 
37
  inference_time = time.time() - start_time
38
 
 
39
  label_map = {
40
  "LABEL_0": "Negative",
41
  "LABEL_1": "Neutral",
42
  "LABEL_2": "Positive"
43
  }
44
 
 
45
  highest_score_result = max(results[0], key=lambda x: x['score'])
46
  highest_label = label_map[highest_score_result['label']]
47
  highest_score = round(highest_score_result['score'], 4)
48
 
 
49
  return {
50
  "text": tweet,
51
  "label": highest_label,
52
  "score": highest_score,
53
+ "inference_time": round(inference_time, 4)
54
  }
model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fb016b6fed49bb877eee7cec2da0a28e6703d200741d19547c06c59c6993078
3
+ size 1453