ashwml commited on
Commit
bbbcb2c
1 Parent(s): 8a1b327

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -17
app.py CHANGED
@@ -6,7 +6,7 @@ import json
6
  import numpy as np
7
  # from fastapi import FastAPI,Response
8
  # from sklearn.metrics import accuracy_score, f1_score
9
- # import prometheus_client as prom
10
  import pandas as pd
11
  # import uvicorn
12
  import os
@@ -21,26 +21,58 @@ import torch
21
  # app=FastAPI()
22
 
23
 
24
- # test_data=pd.read_csv("test.csv")
25
 
26
 
27
- # f1_metric = prom.Gauge('death_f1_score', 'F1 score for test samples')
28
 
29
  # Function for updating metrics
30
- # def update_metrics():
31
- # test = test_data.sample(20)
32
- # X = test.iloc[:, :-1].values
33
- # y = test['DEATH_EVENT'].values
 
 
34
 
35
- # # test_text = test['Text'].values
36
- # test_pred = loaded_model.predict(X)
37
- # #pred_labels = [int(pred['label'].split("_")[1]) for pred in test_pred]
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
- # f1 = f1_score( y , test_pred).round(3)
 
 
 
40
 
41
- # #f1 = f1_score(test['labels'], pred_labels).round(3)
 
 
42
 
43
- # f1_metric.set(f1)
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
 
46
 
@@ -101,10 +133,10 @@ def predict_event(image):
101
 
102
 
103
 
104
- # @app.get("/metrics")
105
- # async def get_metrics():
106
- # update_metrics()
107
- # return Response(media_type="text/plain", content= prom.generate_latest())
108
 
109
 
110
 
 
6
  import numpy as np
7
  # from fastapi import FastAPI,Response
8
  # from sklearn.metrics import accuracy_score, f1_score
9
+ import prometheus_client as prom
10
  import pandas as pd
11
  # import uvicorn
12
  import os
 
21
  # app=FastAPI()
22
 
23
 
24
+ test_data=pd.read_csv("test.csv")
25
 
26
 
27
+ f1_metric = prom.Gauge('bertscore_f1_score', 'F1 score for captions')
28
 
29
  # Function for updating metrics
30
+
31
+
32
+ def update_metrics():
33
+ # test = test_data.sample(20)
34
+ # X = test.iloc[:, :-1].values
35
+ # y = test['DEATH_EVENT'].values
36
 
37
+ # test_text = test['Text'].values
38
+ # test_pred = loaded_model.predict(X)
39
+ #pred_labels = [int(pred['label'].split("_")[1]) for pred in test_pred]
40
+
41
+ # f1 = f1_score( y , test_pred).round(3)
42
+
43
+ #f1 = f1_score(test['labels'], pred_labels).round(3)
44
+
45
+ # f1_metric.set(f1)
46
+
47
+
48
+
49
+ # dict_metric_scores = {}
50
+
51
+ labels_ids = eval_pred.label_ids
52
+ pred_ids = eval_pred.predictions
53
 
54
+ # all unnecessary tokens are removed
55
+ pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True)
56
+ labels_ids[labels_ids == -100] = tokenizer.pad_token_id
57
+ label_str = tokenizer.batch_decode(labels_ids, skip_special_tokens=True)
58
 
59
+ # calculating various metrics
60
+ rouge_output = dict_metrics["rouge2"].compute(predictions=pred_str, references=label_str, rouge_types=["rouge2"])
61
+ dict_metric_scores["rouge2_score"] = rouge_output['rouge2']
62
 
63
+
64
+
65
+ bertscore_output = dict_metrics["bertscore"].compute(predictions=pred_str, references=label_str, lang="en")
66
+
67
+ bert_f1_metric = bertscore_output['f1']
68
+ f1_metric.set(bert_f1_metric)
69
+
70
+
71
+
72
+ # return dict_metric_scores
73
+
74
+ #bertscore or rougue
75
+
76
 
77
 
78
 
 
133
 
134
 
135
 
136
+ @app.get("/metrics")
137
+ async def get_metrics():
138
+ update_metrics()
139
+ return Response(media_type="text/plain", content= prom.generate_latest())
140
 
141
 
142