Spaces:
Sleeping
Sleeping
File size: 1,371 Bytes
f934c22 34f7a51 fa039ae a807e02 7cade8e 16bf7a2 ff939b8 34f7a51 f934c22 ff939b8 33dbdc9 6cd2282 33dbdc9 fd38f46 fa039ae 417fa02 7cade8e 417fa02 ff939b8 34f7a51 cd69f4c 34f7a51 6cd2282 f934c22 34f7a51 f934c22 6cd2282 f934c22 6cd2282 f934c22 a807e02 7cade8e ff939b8 f934c22 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
import gradio as gr
from LLM.LLMGuard.GuardProcessor import (
process_output_with_llmguard,
process_input_with_llmguard,
)
from dotenv import load_dotenv
from LLM.LLamaLLM import get_pipeline, generate_output
pipeline = None
def run_llm_guard(prompt: str) -> str:
"""
Run LLMGuard on a prompt. This function processes both input and output with Presidio.
Args:
prompt (str): The prompt to process.
Returns:
str: The processed prompt.
"""
regex_vault = {}
anonymize_result = process_input_with_llmguard(prompt, regex_vault)
mock_output = generate_output(
anonymize_result.text,
pipeline,
)
processed_output = process_output_with_llmguard(mock_output, regex_vault)
return anonymize_result.text, mock_output, processed_output.text
iface = gr.Interface(
fn=run_llm_guard,
inputs=gr.Textbox(label="Prompt", lines=1),
outputs=[
gr.Textbox(label="Processed Anonymized Prompt", lines=1),
gr.Textbox(label="Model Output", lines=1),
gr.Textbox(label="Processed Deanonymized Output", lines=1),
],
title="LLMGuard Tester",
description="Enter a prompt to generate LLM output and process it with Presidio. Current mode is text generation.",
)
if __name__ == "__main__":
load_dotenv()
pipeline = get_pipeline()
iface.launch()
|