Spaces:
Running
Running
geekyrakshit
commited on
Commit
·
2b2ab5b
1
Parent(s):
a62b646
update: evaluation app
Browse files
application_pages/evaluation_app.py
CHANGED
@@ -6,6 +6,7 @@ import streamlit as st
|
|
6 |
import weave
|
7 |
from dotenv import load_dotenv
|
8 |
|
|
|
9 |
from guardrails_genie.llm import OpenAIModel
|
10 |
from guardrails_genie.metrics import AccuracyMetric
|
11 |
|
@@ -18,37 +19,44 @@ def initialize_session_state():
|
|
18 |
st.session_state.uploaded_file = None
|
19 |
if "dataset_name" not in st.session_state:
|
20 |
st.session_state.dataset_name = ""
|
21 |
-
if "
|
22 |
-
st.session_state.
|
23 |
if "dataset_ref" not in st.session_state:
|
24 |
st.session_state.dataset_ref = None
|
25 |
if "dataset_previewed" not in st.session_state:
|
26 |
st.session_state.dataset_previewed = False
|
27 |
-
if "
|
28 |
-
st.session_state.
|
29 |
-
if "
|
30 |
-
st.session_state.
|
31 |
if "start_evaluation" not in st.session_state:
|
32 |
st.session_state.start_evaluation = False
|
33 |
if "evaluation_summary" not in st.session_state:
|
34 |
st.session_state.evaluation_summary = None
|
|
|
|
|
35 |
|
36 |
|
37 |
def initialize_guardrail():
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
|
|
|
|
|
|
|
|
|
|
52 |
|
53 |
|
54 |
initialize_session_state()
|
@@ -60,8 +68,8 @@ uploaded_file = st.sidebar.file_uploader(
|
|
60 |
st.session_state.uploaded_file = uploaded_file
|
61 |
dataset_name = st.sidebar.text_input("Evaluation dataset name", value="")
|
62 |
st.session_state.dataset_name = dataset_name
|
63 |
-
|
64 |
-
st.session_state.
|
65 |
|
66 |
if st.session_state.uploaded_file is not None and st.session_state.dataset_name != "":
|
67 |
with st.expander("Evaluation Dataset Preview", expanded=True):
|
@@ -79,16 +87,15 @@ if st.session_state.uploaded_file is not None and st.session_state.dataset_name
|
|
79 |
f"Dataset published to [**Weave**](https://wandb.ai/{entity}/{project}/weave/objects/{dataset_name}/versions/{digest})"
|
80 |
)
|
81 |
|
82 |
-
if
|
83 |
st.dataframe(dataframe)
|
84 |
|
85 |
st.session_state.dataset_previewed = True
|
86 |
|
87 |
if st.session_state.dataset_previewed:
|
88 |
-
|
89 |
-
"Select
|
90 |
-
options=[
|
91 |
-
+ [
|
92 |
cls_name
|
93 |
for cls_name, cls_obj in vars(
|
94 |
import_module("guardrails_genie.guardrails")
|
@@ -96,11 +103,11 @@ if st.session_state.dataset_previewed:
|
|
96 |
if isinstance(cls_obj, type) and cls_name != "GuardrailManager"
|
97 |
],
|
98 |
)
|
99 |
-
st.session_state.
|
100 |
|
101 |
-
if st.session_state.
|
102 |
initialize_guardrail()
|
103 |
-
if st.session_state.
|
104 |
if st.sidebar.button("Start Evaluation"):
|
105 |
st.session_state.start_evaluation = True
|
106 |
if st.session_state.start_evaluation:
|
@@ -110,9 +117,12 @@ if st.session_state.dataset_previewed:
|
|
110 |
streamlit_mode=True,
|
111 |
)
|
112 |
with st.expander("Evaluation Results", expanded=True):
|
113 |
-
evaluation_summary = asyncio.run(
|
114 |
-
evaluation.evaluate(
|
|
|
|
|
115 |
)
|
|
|
116 |
st.write(evaluation_summary)
|
117 |
st.session_state.evaluation_summary = evaluation_summary
|
118 |
st.session_state.start_evaluation = False
|
|
|
6 |
import weave
|
7 |
from dotenv import load_dotenv
|
8 |
|
9 |
+
from guardrails_genie.guardrails import GuardrailManager
|
10 |
from guardrails_genie.llm import OpenAIModel
|
11 |
from guardrails_genie.metrics import AccuracyMetric
|
12 |
|
|
|
19 |
st.session_state.uploaded_file = None
|
20 |
if "dataset_name" not in st.session_state:
|
21 |
st.session_state.dataset_name = ""
|
22 |
+
if "preview_in_app" not in st.session_state:
|
23 |
+
st.session_state.preview_in_app = False
|
24 |
if "dataset_ref" not in st.session_state:
|
25 |
st.session_state.dataset_ref = None
|
26 |
if "dataset_previewed" not in st.session_state:
|
27 |
st.session_state.dataset_previewed = False
|
28 |
+
if "guardrail_names" not in st.session_state:
|
29 |
+
st.session_state.guardrail_names = []
|
30 |
+
if "guardrails" not in st.session_state:
|
31 |
+
st.session_state.guardrails = []
|
32 |
if "start_evaluation" not in st.session_state:
|
33 |
st.session_state.start_evaluation = False
|
34 |
if "evaluation_summary" not in st.session_state:
|
35 |
st.session_state.evaluation_summary = None
|
36 |
+
if "guardrail_manager" not in st.session_state:
|
37 |
+
st.session_state.guardrail_manager = None
|
38 |
|
39 |
|
40 |
def initialize_guardrail():
|
41 |
+
guardrails = []
|
42 |
+
for guardrail_name in st.session_state.guardrail_names:
|
43 |
+
if guardrail_name == "PromptInjectionSurveyGuardrail":
|
44 |
+
survey_guardrail_model = st.sidebar.selectbox(
|
45 |
+
"Survey Guardrail LLM", ["", "gpt-4o-mini", "gpt-4o"]
|
46 |
+
)
|
47 |
+
if survey_guardrail_model:
|
48 |
+
guardrails.append(
|
49 |
+
getattr(
|
50 |
+
import_module("guardrails_genie.guardrails"),
|
51 |
+
guardrail_name,
|
52 |
+
)(llm_model=OpenAIModel(model_name=survey_guardrail_model))
|
53 |
+
)
|
54 |
+
else:
|
55 |
+
guardrails.append(
|
56 |
+
getattr(import_module("guardrails_genie.guardrails"), guardrail_name)()
|
57 |
+
)
|
58 |
+
st.session_state.guardrails = guardrails
|
59 |
+
st.session_state.guardrail_manager = GuardrailManager(guardrails=guardrails)
|
60 |
|
61 |
|
62 |
initialize_session_state()
|
|
|
68 |
st.session_state.uploaded_file = uploaded_file
|
69 |
dataset_name = st.sidebar.text_input("Evaluation dataset name", value="")
|
70 |
st.session_state.dataset_name = dataset_name
|
71 |
+
preview_in_app = st.sidebar.toggle("Preview in app", value=False)
|
72 |
+
st.session_state.preview_in_app = preview_in_app
|
73 |
|
74 |
if st.session_state.uploaded_file is not None and st.session_state.dataset_name != "":
|
75 |
with st.expander("Evaluation Dataset Preview", expanded=True):
|
|
|
87 |
f"Dataset published to [**Weave**](https://wandb.ai/{entity}/{project}/weave/objects/{dataset_name}/versions/{digest})"
|
88 |
)
|
89 |
|
90 |
+
if preview_in_app:
|
91 |
st.dataframe(dataframe)
|
92 |
|
93 |
st.session_state.dataset_previewed = True
|
94 |
|
95 |
if st.session_state.dataset_previewed:
|
96 |
+
guardrail_names = st.sidebar.multiselect(
|
97 |
+
"Select Guardrails",
|
98 |
+
options=[
|
|
|
99 |
cls_name
|
100 |
for cls_name, cls_obj in vars(
|
101 |
import_module("guardrails_genie.guardrails")
|
|
|
103 |
if isinstance(cls_obj, type) and cls_name != "GuardrailManager"
|
104 |
],
|
105 |
)
|
106 |
+
st.session_state.guardrail_names = guardrail_names
|
107 |
|
108 |
+
if st.session_state.guardrail_names != []:
|
109 |
initialize_guardrail()
|
110 |
+
if st.session_state.guardrail_manager is not None:
|
111 |
if st.sidebar.button("Start Evaluation"):
|
112 |
st.session_state.start_evaluation = True
|
113 |
if st.session_state.start_evaluation:
|
|
|
117 |
streamlit_mode=True,
|
118 |
)
|
119 |
with st.expander("Evaluation Results", expanded=True):
|
120 |
+
evaluation_summary, call = asyncio.run(
|
121 |
+
evaluation.evaluate.call(
|
122 |
+
evaluation, st.session_state.guardrail_manager
|
123 |
+
)
|
124 |
)
|
125 |
+
st.markdown(f"[Explore evaluation in Weave]({call.ui_url})")
|
126 |
st.write(evaluation_summary)
|
127 |
st.session_state.evaluation_summary = evaluation_summary
|
128 |
st.session_state.start_evaluation = False
|
guardrails_genie/guardrails/injection/protectai_guardrail.py
CHANGED
@@ -23,7 +23,7 @@ class PromptInjectionProtectAIGuardrail(Guardrail):
|
|
23 |
max_length=512,
|
24 |
device=torch.device("cuda" if torch.cuda.is_available() else "cpu"),
|
25 |
)
|
26 |
-
|
27 |
@weave.op()
|
28 |
def classify(self, prompt: str):
|
29 |
return self._classifier(prompt)
|
|
|
23 |
max_length=512,
|
24 |
device=torch.device("cuda" if torch.cuda.is_available() else "cpu"),
|
25 |
)
|
26 |
+
|
27 |
@weave.op()
|
28 |
def classify(self, prompt: str):
|
29 |
return self._classifier(prompt)
|
guardrails_genie/guardrails/manager.py
CHANGED
@@ -1,20 +1,28 @@
|
|
1 |
import weave
|
2 |
from rich.progress import track
|
3 |
-
from weave.flow.obj import Object as WeaveObject
|
4 |
|
5 |
from .base import Guardrail
|
6 |
|
7 |
|
8 |
-
class GuardrailManager(
|
9 |
guardrails: list[Guardrail]
|
10 |
|
11 |
@weave.op()
|
12 |
-
def guard(self, prompt: str, **kwargs) -> dict:
|
13 |
alerts, safe = [], True
|
14 |
-
|
|
|
|
|
|
|
|
|
|
|
15 |
response = guardrail.guard(prompt, **kwargs)
|
16 |
alerts.append(
|
17 |
{"guardrail_name": guardrail.__class__.__name__, "response": response}
|
18 |
)
|
19 |
safe = safe and response["safe"]
|
20 |
return {"safe": safe, "alerts": alerts}
|
|
|
|
|
|
|
|
|
|
1 |
import weave
|
2 |
from rich.progress import track
|
|
|
3 |
|
4 |
from .base import Guardrail
|
5 |
|
6 |
|
7 |
+
class GuardrailManager(weave.Model):
|
8 |
guardrails: list[Guardrail]
|
9 |
|
10 |
@weave.op()
|
11 |
+
def guard(self, prompt: str, progress_bar: bool = True, **kwargs) -> dict:
|
12 |
alerts, safe = [], True
|
13 |
+
iterable = (
|
14 |
+
track(self.guardrails, description="Running guardrails")
|
15 |
+
if progress_bar
|
16 |
+
else self.guardrails
|
17 |
+
)
|
18 |
+
for guardrail in iterable:
|
19 |
response = guardrail.guard(prompt, **kwargs)
|
20 |
alerts.append(
|
21 |
{"guardrail_name": guardrail.__class__.__name__, "response": response}
|
22 |
)
|
23 |
safe = safe and response["safe"]
|
24 |
return {"safe": safe, "alerts": alerts}
|
25 |
+
|
26 |
+
@weave.op()
|
27 |
+
def predict(self, prompt: str, **kwargs) -> dict:
|
28 |
+
return self.guard(prompt, progress_bar=False, **kwargs)
|
guardrails_genie/metrics.py
CHANGED
@@ -17,11 +17,6 @@ class AccuracyMetric(weave.Scorer):
|
|
17 |
count_true = list(valid_data).count(True)
|
18 |
int_data = [int(x) for x in valid_data]
|
19 |
|
20 |
-
sample_mean = np.mean(int_data) if int_data else 0
|
21 |
-
sample_variance = np.var(int_data) if int_data else 0
|
22 |
-
sample_error = np.sqrt(sample_variance / len(int_data)) if int_data else 0
|
23 |
-
|
24 |
-
# Calculate precision, recall, and F1 score
|
25 |
true_positives = count_true
|
26 |
false_positives = len(valid_data) - count_true
|
27 |
false_negatives = len(score_rows) - len(valid_data)
|
@@ -43,7 +38,7 @@ class AccuracyMetric(weave.Scorer):
|
|
43 |
)
|
44 |
|
45 |
return {
|
46 |
-
"accuracy": float(
|
47 |
"precision": precision,
|
48 |
"recall": recall,
|
49 |
"f1_score": f1_score,
|
|
|
17 |
count_true = list(valid_data).count(True)
|
18 |
int_data = [int(x) for x in valid_data]
|
19 |
|
|
|
|
|
|
|
|
|
|
|
20 |
true_positives = count_true
|
21 |
false_positives = len(valid_data) - count_true
|
22 |
false_negatives = len(score_rows) - len(valid_data)
|
|
|
38 |
)
|
39 |
|
40 |
return {
|
41 |
+
"accuracy": float(np.mean(int_data) if int_data else 0),
|
42 |
"precision": precision,
|
43 |
"recall": recall,
|
44 |
"f1_score": f1_score,
|