Spaces:
Running
on
Zero
Running
on
Zero
Martín Santillán Cooper
commited on
Commit
•
33193a0
1
Parent(s):
4ef9518
configure rag editable inputs
Browse files- src/app.py +44 -4
- src/model.py +1 -0
src/app.py
CHANGED
@@ -27,18 +27,58 @@ def on_test_case_click(state: gr.State):
|
|
27 |
selected_sub_catalog = state['selected_sub_catalog']
|
28 |
selected_criteria_name = state['selected_criteria_name']
|
29 |
selected_test_case = state['selected_test_case']
|
|
|
30 |
logger.debug(f'Changing to test case "{selected_criteria_name}" from catalog "{selected_sub_catalog}".')
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
return {
|
32 |
test_case_name: f'<h2>{to_title_case(selected_test_case["name"])}</h2>',
|
33 |
criteria: selected_test_case['criteria'],
|
34 |
-
context: gr.update(
|
35 |
-
|
36 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
result_text: gr.update(visible=False, value='')
|
38 |
}
|
39 |
|
40 |
def change_button_color(event: gr.EventData):
|
41 |
-
return [gr.update(
|
|
|
|
|
|
|
|
|
42 |
|
43 |
def on_submit(criteria, context, user_message, assistant_message, state):
|
44 |
criteria_name = state['selected_criteria_name']
|
|
|
27 |
selected_sub_catalog = state['selected_sub_catalog']
|
28 |
selected_criteria_name = state['selected_criteria_name']
|
29 |
selected_test_case = state['selected_test_case']
|
30 |
+
|
31 |
logger.debug(f'Changing to test case "{selected_criteria_name}" from catalog "{selected_sub_catalog}".')
|
32 |
+
|
33 |
+
is_context_iditable = selected_criteria_name == 'context_relevance'
|
34 |
+
is_user_message_editable = selected_sub_catalog == 'harmful_content_in_user_prompt'
|
35 |
+
is_assistant_message_editable = selected_sub_catalog == 'harmful_content_in_assistant_response' or \
|
36 |
+
selected_criteria_name == 'groundedness' or \
|
37 |
+
selected_criteria_name == 'answer_relevance'
|
38 |
return {
|
39 |
test_case_name: f'<h2>{to_title_case(selected_test_case["name"])}</h2>',
|
40 |
criteria: selected_test_case['criteria'],
|
41 |
+
context: gr.update(
|
42 |
+
value=selected_test_case['context'],
|
43 |
+
interactive=True,
|
44 |
+
visible=True,
|
45 |
+
elem_classes=['input-box']
|
46 |
+
) if is_context_iditable else gr.update(
|
47 |
+
visible=selected_test_case['context'] is not None,
|
48 |
+
value=selected_test_case['context'],
|
49 |
+
interactive=False,
|
50 |
+
elem_classes=['read-only', 'input-box']
|
51 |
+
),
|
52 |
+
user_message: gr.update(
|
53 |
+
value=selected_test_case['user_message'],
|
54 |
+
visible=True,
|
55 |
+
interactive=True,
|
56 |
+
elem_classes=['input-box']
|
57 |
+
) if is_user_message_editable else gr.update(
|
58 |
+
value=selected_test_case['user_message'],
|
59 |
+
interactive=False,
|
60 |
+
elem_classes=['read-only', 'input-box']
|
61 |
+
),
|
62 |
+
assistant_message: gr.update(
|
63 |
+
value=selected_test_case['assistant_message'],
|
64 |
+
visible=True,
|
65 |
+
interactive=True,
|
66 |
+
elem_classes=['input-box']
|
67 |
+
) if is_assistant_message_editable else gr.update(
|
68 |
+
visible=selected_test_case['assistant_message'] is not None,
|
69 |
+
value=selected_test_case['assistant_message'],
|
70 |
+
interactive=False,
|
71 |
+
elem_classes=['read-only', 'input-box']
|
72 |
+
),
|
73 |
result_text: gr.update(visible=False, value='')
|
74 |
}
|
75 |
|
76 |
def change_button_color(event: gr.EventData):
|
77 |
+
return [gr.update(
|
78 |
+
elem_classes=['catalog-button', 'selected']
|
79 |
+
) if v.elem_id == event.target.elem_id else gr.update(
|
80 |
+
elem_classes=['catalog-button']
|
81 |
+
) for c in catalog_buttons.values() for v in c.values()]
|
82 |
|
83 |
def on_submit(criteria, context, user_message, assistant_message, state):
|
84 |
criteria_name = state['selected_criteria_name']
|
src/model.py
CHANGED
@@ -13,6 +13,7 @@ if not mock_model_call:
|
|
13 |
from vllm import LLM, SamplingParams
|
14 |
from transformers import AutoTokenizer
|
15 |
model_path = os.getenv('MODEL_PATH') #"granite-guardian-3b-pipecleaner-r241024a"
|
|
|
16 |
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
17 |
sampling_params = SamplingParams(temperature=0.0, logprobs=nlogprobs)
|
18 |
model = LLM(model=model_path, tensor_parallel_size=1)
|
|
|
13 |
from vllm import LLM, SamplingParams
|
14 |
from transformers import AutoTokenizer
|
15 |
model_path = os.getenv('MODEL_PATH') #"granite-guardian-3b-pipecleaner-r241024a"
|
16 |
+
logger.debug(f"model_path is {model_path}")
|
17 |
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
18 |
sampling_params = SamplingParams(temperature=0.0, logprobs=nlogprobs)
|
19 |
model = LLM(model=model_path, tensor_parallel_size=1)
|