Spaces:
Sleeping
Sleeping
suryanshp1
commited on
Commit
Β·
a15c928
1
Parent(s):
a173b58
faet: guradrails
Browse files- .env.example +4 -1
- GUARDRAILS_SETUP.md +229 -0
- README.md +1 -4
- docker-compose.app-only.yml +1 -0
- docker-compose.yml +1 -0
- requirements.txt +3 -1
- src/langgraphagenticai/guardrails/__init__.py +1 -0
- src/langgraphagenticai/guardrails/guardrails_config.py +105 -0
- src/langgraphagenticai/guardrails/llm_wrapper.py +160 -0
- src/langgraphagenticai/guardrails/validation_service.py +140 -0
- src/langgraphagenticai/llms/groqllm.py +11 -3
- src/langgraphagenticai/main.py +6 -4
- src/langgraphagenticai/ui/streamlitui/display_result.py +11 -1
- src/langgraphagenticai/ui/streamlitui/loadui.py +18 -2
.env.example
CHANGED
|
@@ -12,4 +12,7 @@ TELEMETRY_ENABLED=true
|
|
| 12 |
LANGFUSE_ENABLE_EXPERIMENTAL_FEATURES=false
|
| 13 |
|
| 14 |
# Environment Mode (development/production)
|
| 15 |
-
STREAMLIT_ENV=development
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
LANGFUSE_ENABLE_EXPERIMENTAL_FEATURES=false
|
| 13 |
|
| 14 |
# Environment Mode (development/production)
|
| 15 |
+
STREAMLIT_ENV=development
|
| 16 |
+
|
| 17 |
+
# Guardrails Configuration
|
| 18 |
+
GUARDRAILS_ENABLED=true
|
GUARDRAILS_SETUP.md
ADDED
|
@@ -0,0 +1,229 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Guardrails AI Integration
|
| 2 |
+
|
| 3 |
+
This guide explains how Guardrails AI is integrated into your LangGraph application to provide safety, validation, and quality control for LLM interactions.
|
| 4 |
+
|
| 5 |
+
## π‘οΈ What is Guardrails AI?
|
| 6 |
+
|
| 7 |
+
Guardrails AI is a Python framework that adds safety rails to LLM applications by:
|
| 8 |
+
- Validating inputs and outputs
|
| 9 |
+
- Detecting toxic/harmful content
|
| 10 |
+
- Ensuring output quality and structure
|
| 11 |
+
- Preventing sensitive topic discussions
|
| 12 |
+
- Adding content moderation
|
| 13 |
+
|
| 14 |
+
## π Features Implemented
|
| 15 |
+
|
| 16 |
+
### 1. Input Validation
|
| 17 |
+
- **Toxic Language Detection**: Prevents harmful input from reaching the LLM
|
| 18 |
+
- **Profanity Filtering**: Blocks inappropriate language
|
| 19 |
+
- **Length Validation**: Ensures inputs are within reasonable limits (1-2000 chars)
|
| 20 |
+
|
| 21 |
+
### 2. Output Quality Control
|
| 22 |
+
- **Content Length**: Validates response length (10-5000 chars)
|
| 23 |
+
- **Reading Time**: Ensures responses are appropriately sized (1-300 seconds reading time)
|
| 24 |
+
- **Quality Assurance**: Maintains response quality standards
|
| 25 |
+
|
| 26 |
+
### 3. Content Moderation
|
| 27 |
+
- **Sensitive Topics**: Detects and prevents discussions of harmful topics
|
| 28 |
+
- **Safety Filtering**: Blocks violence, hate speech, harassment content
|
| 29 |
+
- **Threshold-based Detection**: Configurable sensitivity levels
|
| 30 |
+
|
| 31 |
+
### 4. Structured Output Validation
|
| 32 |
+
- **Schema Validation**: Ensures outputs match expected formats
|
| 33 |
+
- **Type Safety**: Validates data types and structures
|
| 34 |
+
- **Confidence Scoring**: Adds confidence metrics to responses
|
| 35 |
+
|
| 36 |
+
## π§ Configuration
|
| 37 |
+
|
| 38 |
+
### Environment Variables
|
| 39 |
+
|
| 40 |
+
```env
|
| 41 |
+
# Enable/disable Guardrails
|
| 42 |
+
GUARDRAILS_ENABLED=true
|
| 43 |
+
|
| 44 |
+
# Environment mode affects error display
|
| 45 |
+
STREAMLIT_ENV=development
|
| 46 |
+
```
|
| 47 |
+
|
| 48 |
+
### Guard Types
|
| 49 |
+
|
| 50 |
+
The application uses different guards for different scenarios:
|
| 51 |
+
|
| 52 |
+
1. **Input Safety Guard**: Validates user inputs
|
| 53 |
+
2. **Output Quality Guard**: Ensures response quality
|
| 54 |
+
3. **Content Moderation Guard**: Filters harmful content
|
| 55 |
+
4. **Structured Output Guard**: Validates response format
|
| 56 |
+
|
| 57 |
+
## π Usage
|
| 58 |
+
|
| 59 |
+
### Automatic Protection
|
| 60 |
+
|
| 61 |
+
Guardrails are automatically applied to:
|
| 62 |
+
- All user inputs before processing
|
| 63 |
+
- All LLM outputs before display
|
| 64 |
+
- Tool interactions and responses
|
| 65 |
+
- MCP server communications
|
| 66 |
+
|
| 67 |
+
### Safety Warnings
|
| 68 |
+
|
| 69 |
+
When content is filtered, users see friendly warnings:
|
| 70 |
+
- π‘οΈ **Input Safety**: Input was modified for safety
|
| 71 |
+
- π‘οΈ **Output Safety**: Response was filtered for safety
|
| 72 |
+
|
| 73 |
+
### Fallback Behavior
|
| 74 |
+
|
| 75 |
+
If validation fails:
|
| 76 |
+
- **Input**: Safe alternative message is used
|
| 77 |
+
- **Output**: Filtered safe response is provided
|
| 78 |
+
- **Errors**: Application continues without breaking
|
| 79 |
+
|
| 80 |
+
## π― Use Case Specific Guards
|
| 81 |
+
|
| 82 |
+
### Basic Chatbot
|
| 83 |
+
- Input safety validation
|
| 84 |
+
- Output quality control
|
| 85 |
+
- Basic content moderation
|
| 86 |
+
|
| 87 |
+
### Tool-Enabled Chatbots (MCP, Search)
|
| 88 |
+
- Enhanced content moderation
|
| 89 |
+
- Tool output validation
|
| 90 |
+
- Structured response validation
|
| 91 |
+
|
| 92 |
+
### AI News
|
| 93 |
+
- News content appropriateness
|
| 94 |
+
- Source validation
|
| 95 |
+
- Information accuracy checks
|
| 96 |
+
|
| 97 |
+
## π Monitoring Integration
|
| 98 |
+
|
| 99 |
+
Guardrails works seamlessly with Langfuse monitoring:
|
| 100 |
+
- Validation events are logged
|
| 101 |
+
- Safety metrics are tracked
|
| 102 |
+
- Guard performance is monitored
|
| 103 |
+
- Compliance reports available
|
| 104 |
+
|
| 105 |
+
## π οΈ Customization
|
| 106 |
+
|
| 107 |
+
### Adding Custom Guards
|
| 108 |
+
|
| 109 |
+
```python
|
| 110 |
+
from guardrails import Guard
|
| 111 |
+
from guardrails.validators import ValidLength
|
| 112 |
+
|
| 113 |
+
# Create custom guard
|
| 114 |
+
custom_guard = Guard().use(
|
| 115 |
+
ValidLength(min=5, max=100),
|
| 116 |
+
# Add more validators
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
# Add to configuration
|
| 120 |
+
guardrails_config.guards["custom"] = custom_guard
|
| 121 |
+
```
|
| 122 |
+
|
| 123 |
+
### Adjusting Thresholds
|
| 124 |
+
|
| 125 |
+
```python
|
| 126 |
+
# Modify sensitivity in guardrails_config.py
|
| 127 |
+
ToxicLanguage(threshold=0.8) # Higher = less sensitive
|
| 128 |
+
SensitiveTopics(threshold=0.7) # Lower = more sensitive
|
| 129 |
+
```
|
| 130 |
+
|
| 131 |
+
## π Safety Dashboard
|
| 132 |
+
|
| 133 |
+
### Status Indicators
|
| 134 |
+
- π‘οΈ **Guardrails: ON/OFF** - Shows protection status
|
| 135 |
+
- **Active Guards**: Number of active validation rules
|
| 136 |
+
- **Safety Metrics**: Validation statistics
|
| 137 |
+
|
| 138 |
+
### Real-time Feedback
|
| 139 |
+
- Input validation warnings
|
| 140 |
+
- Output filtering notifications
|
| 141 |
+
- Safety compliance indicators
|
| 142 |
+
|
| 143 |
+
## π¨ Troubleshooting
|
| 144 |
+
|
| 145 |
+
### Guardrails Not Working
|
| 146 |
+
|
| 147 |
+
1. **Check Environment Variable**:
|
| 148 |
+
```bash
|
| 149 |
+
echo $GUARDRAILS_ENABLED
|
| 150 |
+
```
|
| 151 |
+
|
| 152 |
+
2. **Verify Installation**:
|
| 153 |
+
```bash
|
| 154 |
+
pip list | grep guardrails
|
| 155 |
+
```
|
| 156 |
+
|
| 157 |
+
3. **Check Logs**:
|
| 158 |
+
- Look for Guardrails initialization messages
|
| 159 |
+
- Check for validation warnings in UI
|
| 160 |
+
|
| 161 |
+
### Performance Issues
|
| 162 |
+
|
| 163 |
+
1. **Disable Specific Guards**:
|
| 164 |
+
```python
|
| 165 |
+
# In guardrails_config.py
|
| 166 |
+
# Comment out heavy validators
|
| 167 |
+
```
|
| 168 |
+
|
| 169 |
+
2. **Adjust Thresholds**:
|
| 170 |
+
```python
|
| 171 |
+
# Increase thresholds for better performance
|
| 172 |
+
ToxicLanguage(threshold=0.9)
|
| 173 |
+
```
|
| 174 |
+
|
| 175 |
+
### False Positives
|
| 176 |
+
|
| 177 |
+
1. **Review Validation Rules**:
|
| 178 |
+
- Check sensitive topics list
|
| 179 |
+
- Adjust toxicity thresholds
|
| 180 |
+
- Modify content filters
|
| 181 |
+
|
| 182 |
+
2. **Whitelist Content**:
|
| 183 |
+
```python
|
| 184 |
+
# Add exceptions for specific use cases
|
| 185 |
+
```
|
| 186 |
+
|
| 187 |
+
## π Security Benefits
|
| 188 |
+
|
| 189 |
+
### Input Protection
|
| 190 |
+
- Prevents prompt injection attacks
|
| 191 |
+
- Blocks malicious content
|
| 192 |
+
- Validates input formats
|
| 193 |
+
|
| 194 |
+
### Output Safety
|
| 195 |
+
- Ensures appropriate responses
|
| 196 |
+
- Prevents harmful content generation
|
| 197 |
+
- Maintains quality standards
|
| 198 |
+
|
| 199 |
+
### Compliance
|
| 200 |
+
- Meets safety guidelines
|
| 201 |
+
- Supports content policies
|
| 202 |
+
- Enables audit trails
|
| 203 |
+
|
| 204 |
+
## π Performance Impact
|
| 205 |
+
|
| 206 |
+
### Minimal Overhead
|
| 207 |
+
- Lightweight validation
|
| 208 |
+
- Async processing where possible
|
| 209 |
+
- Fail-safe fallbacks
|
| 210 |
+
|
| 211 |
+
### Optimization
|
| 212 |
+
- Cached validation results
|
| 213 |
+
- Efficient rule processing
|
| 214 |
+
- Smart threshold management
|
| 215 |
+
|
| 216 |
+
## π Resources
|
| 217 |
+
|
| 218 |
+
- [Guardrails AI Documentation](https://docs.guardrailsai.com/)
|
| 219 |
+
- [Guardrails Hub](https://hub.guardrailsai.com/)
|
| 220 |
+
- [GitHub Repository](https://github.com/guardrails-ai/guardrails)
|
| 221 |
+
- [Community Examples](https://github.com/guardrails-ai/guardrails/tree/main/examples)
|
| 222 |
+
|
| 223 |
+
## π Support
|
| 224 |
+
|
| 225 |
+
For issues with Guardrails integration:
|
| 226 |
+
1. Check the troubleshooting section
|
| 227 |
+
2. Review environment configuration
|
| 228 |
+
3. Consult Guardrails documentation
|
| 229 |
+
4. Open an issue in the project repository
|
README.md
CHANGED
|
@@ -12,7 +12,4 @@ short_description: agenticai
|
|
| 12 |
license: mit
|
| 13 |
---
|
| 14 |
|
| 15 |
-
### END TO END AGENTIC AI APPLICATION
|
| 16 |
-
|
| 17 |
-
- Add support for memory, tools and MCP in it
|
| 18 |
-
- Add guardrail and lanfuse support
|
|
|
|
| 12 |
license: mit
|
| 13 |
---
|
| 14 |
|
| 15 |
+
### END TO END AGENTIC AI APPLICATION
|
|
|
|
|
|
|
|
|
docker-compose.app-only.yml
CHANGED
|
@@ -13,6 +13,7 @@ services:
|
|
| 13 |
- GROQ_API_KEY=${GROQ_API_KEY}
|
| 14 |
- TAVILY_API_KEY=${TAVILY_API_KEY}
|
| 15 |
- STREAMLIT_ENV=production
|
|
|
|
| 16 |
volumes:
|
| 17 |
- ./src:/app/src
|
| 18 |
- ./app.py:/app/app.py
|
|
|
|
| 13 |
- GROQ_API_KEY=${GROQ_API_KEY}
|
| 14 |
- TAVILY_API_KEY=${TAVILY_API_KEY}
|
| 15 |
- STREAMLIT_ENV=production
|
| 16 |
+
- GUARDRAILS_ENABLED=${GUARDRAILS_ENABLED:-true}
|
| 17 |
volumes:
|
| 18 |
- ./src:/app/src
|
| 19 |
- ./app.py:/app/app.py
|
docker-compose.yml
CHANGED
|
@@ -55,6 +55,7 @@ services:
|
|
| 55 |
- LANGFUSE_PUBLIC_KEY=${LANGFUSE_PUBLIC_KEY:-}
|
| 56 |
- LANGFUSE_HOST=${LANGFUSE_HOST:-http://langfuse-server:3000}
|
| 57 |
- STREAMLIT_ENV=${STREAMLIT_ENV:-development}
|
|
|
|
| 58 |
volumes:
|
| 59 |
- ./src:/app/src
|
| 60 |
- ./app.py:/app/app.py
|
|
|
|
| 55 |
- LANGFUSE_PUBLIC_KEY=${LANGFUSE_PUBLIC_KEY:-}
|
| 56 |
- LANGFUSE_HOST=${LANGFUSE_HOST:-http://langfuse-server:3000}
|
| 57 |
- STREAMLIT_ENV=${STREAMLIT_ENV:-development}
|
| 58 |
+
- GUARDRAILS_ENABLED=${GUARDRAILS_ENABLED:-true}
|
| 59 |
volumes:
|
| 60 |
- ./src:/app/src
|
| 61 |
- ./app.py:/app/app.py
|
requirements.txt
CHANGED
|
@@ -8,4 +8,6 @@ faiss-cpu==1.12.0
|
|
| 8 |
streamlit==1.49.1
|
| 9 |
langchain-mcp-adapters
|
| 10 |
mcp
|
| 11 |
-
langfuse
|
|
|
|
|
|
|
|
|
| 8 |
streamlit==1.49.1
|
| 9 |
langchain-mcp-adapters
|
| 10 |
mcp
|
| 11 |
+
langfuse
|
| 12 |
+
guardrails-ai==0.5.10
|
| 13 |
+
pydantic==2.5.0
|
src/langgraphagenticai/guardrails/__init__.py
CHANGED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
# Guardrails AI Integration Module
|
src/langgraphagenticai/guardrails/guardrails_config.py
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Guardrails Configuration and Management
|
| 3 |
+
Handles safety and validation for LLM inputs and outputs
|
| 4 |
+
"""
|
| 5 |
+
import os
|
| 6 |
+
from typing import Dict, Any, Optional, List
|
| 7 |
+
import streamlit as st
|
| 8 |
+
from guardrails import Guard
|
| 9 |
+
from guardrails.hub import ToxicLanguage, ProfanityFree, ReadingTime, SensitiveTopics
|
| 10 |
+
from guardrails.validators import ValidLength, ValidChoices
|
| 11 |
+
from pydantic import BaseModel, Field
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class GuardrailsConfig:
|
| 15 |
+
"""Configuration for Guardrails AI safety measures"""
|
| 16 |
+
|
| 17 |
+
def __init__(self):
|
| 18 |
+
self.enabled = os.getenv("GUARDRAILS_ENABLED", "true").lower() == "true"
|
| 19 |
+
self.guards = {}
|
| 20 |
+
self._initialize_guards()
|
| 21 |
+
|
| 22 |
+
def _initialize_guards(self):
|
| 23 |
+
"""Initialize different types of guards for various use cases"""
|
| 24 |
+
try:
|
| 25 |
+
if not self.enabled:
|
| 26 |
+
return
|
| 27 |
+
|
| 28 |
+
# Input validation guard
|
| 29 |
+
self.guards["input_safety"] = self._create_input_safety_guard()
|
| 30 |
+
|
| 31 |
+
# Output validation guard
|
| 32 |
+
self.guards["output_quality"] = self._create_output_quality_guard()
|
| 33 |
+
|
| 34 |
+
# Content moderation guard
|
| 35 |
+
self.guards["content_moderation"] = self._create_content_moderation_guard()
|
| 36 |
+
|
| 37 |
+
# Structured output guard
|
| 38 |
+
self.guards["structured_output"] = self._create_structured_output_guard()
|
| 39 |
+
|
| 40 |
+
except Exception as e:
|
| 41 |
+
# Fail silently - guardrails should not break the app
|
| 42 |
+
if os.getenv("STREAMLIT_ENV") != "production":
|
| 43 |
+
st.warning(f"β οΈ Guardrails initialization failed: {e}")
|
| 44 |
+
self.enabled = False
|
| 45 |
+
|
| 46 |
+
def _create_input_safety_guard(self) -> Guard:
|
| 47 |
+
"""Create guard for input safety validation"""
|
| 48 |
+
try:
|
| 49 |
+
return Guard().use(
|
| 50 |
+
ToxicLanguage(threshold=0.8, validation_method="sentence"),
|
| 51 |
+
ProfanityFree(),
|
| 52 |
+
ValidLength(min=1, max=2000)
|
| 53 |
+
)
|
| 54 |
+
except Exception:
|
| 55 |
+
return None
|
| 56 |
+
|
| 57 |
+
def _create_output_quality_guard(self) -> Guard:
|
| 58 |
+
"""Create guard for output quality validation"""
|
| 59 |
+
try:
|
| 60 |
+
return Guard().use(
|
| 61 |
+
ValidLength(min=10, max=5000),
|
| 62 |
+
ReadingTime(reading_time_range=(1, 300)) # 1-300 seconds reading time
|
| 63 |
+
)
|
| 64 |
+
except Exception:
|
| 65 |
+
return None
|
| 66 |
+
|
| 67 |
+
def _create_content_moderation_guard(self) -> Guard:
|
| 68 |
+
"""Create guard for content moderation"""
|
| 69 |
+
try:
|
| 70 |
+
sensitive_topics = [
|
| 71 |
+
"violence", "hate_speech", "harassment",
|
| 72 |
+
"illegal_activities", "self_harm"
|
| 73 |
+
]
|
| 74 |
+
return Guard().use(
|
| 75 |
+
SensitiveTopics(sensitive_topics=sensitive_topics, threshold=0.8),
|
| 76 |
+
ToxicLanguage(threshold=0.7, validation_method="full")
|
| 77 |
+
)
|
| 78 |
+
except Exception:
|
| 79 |
+
return None
|
| 80 |
+
|
| 81 |
+
def _create_structured_output_guard(self) -> Guard:
|
| 82 |
+
"""Create guard for structured output validation"""
|
| 83 |
+
try:
|
| 84 |
+
class ChatResponse(BaseModel):
|
| 85 |
+
content: str = Field(description="The main response content")
|
| 86 |
+
confidence: float = Field(ge=0.0, le=1.0, description="Confidence score")
|
| 87 |
+
safe: bool = Field(description="Whether the response is safe")
|
| 88 |
+
|
| 89 |
+
return Guard.from_pydantic(ChatResponse)
|
| 90 |
+
except Exception:
|
| 91 |
+
return None
|
| 92 |
+
|
| 93 |
+
def get_guard(self, guard_type: str) -> Optional[Guard]:
|
| 94 |
+
"""Get a specific guard by type"""
|
| 95 |
+
if not self.enabled:
|
| 96 |
+
return None
|
| 97 |
+
return self.guards.get(guard_type)
|
| 98 |
+
|
| 99 |
+
def is_enabled(self) -> bool:
|
| 100 |
+
"""Check if guardrails are enabled"""
|
| 101 |
+
return self.enabled and len(self.guards) > 0
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
# Global instance
|
| 105 |
+
guardrails_config = GuardrailsConfig()
|
src/langgraphagenticai/guardrails/llm_wrapper.py
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Guardrails LLM Wrapper
|
| 3 |
+
Wraps LLM calls with input/output validation
|
| 4 |
+
"""
|
| 5 |
+
from typing import Any, List, Optional, Dict
|
| 6 |
+
from langchain_core.language_models.base import BaseLanguageModel
|
| 7 |
+
from langchain_core.messages import BaseMessage, AIMessage, HumanMessage
|
| 8 |
+
from langchain_core.callbacks import CallbackManagerForLLMRun
|
| 9 |
+
from .validation_service import validation_service
|
| 10 |
+
import streamlit as st
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class GuardrailsLLMWrapper(BaseLanguageModel):
|
| 14 |
+
"""
|
| 15 |
+
Wrapper that adds Guardrails validation to any LangChain LLM
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
def __init__(self, llm: BaseLanguageModel, usecase: str = "general"):
|
| 19 |
+
self.llm = llm
|
| 20 |
+
self.usecase = usecase
|
| 21 |
+
self.validation_service = validation_service
|
| 22 |
+
|
| 23 |
+
def _generate(
|
| 24 |
+
self,
|
| 25 |
+
messages: List[BaseMessage],
|
| 26 |
+
stop: Optional[List[str]] = None,
|
| 27 |
+
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
| 28 |
+
**kwargs: Any,
|
| 29 |
+
) -> Any:
|
| 30 |
+
"""Generate response with input/output validation"""
|
| 31 |
+
try:
|
| 32 |
+
# Extract user input for validation
|
| 33 |
+
user_input = ""
|
| 34 |
+
if messages:
|
| 35 |
+
last_message = messages[-1]
|
| 36 |
+
if isinstance(last_message, HumanMessage):
|
| 37 |
+
user_input = last_message.content
|
| 38 |
+
|
| 39 |
+
# Validate input
|
| 40 |
+
if user_input:
|
| 41 |
+
is_valid, processed_input, error_msg = self.validation_service.validate_user_input(user_input)
|
| 42 |
+
|
| 43 |
+
if not is_valid:
|
| 44 |
+
# Show warning to user
|
| 45 |
+
if error_msg:
|
| 46 |
+
st.warning(f"π‘οΈ Input Safety: {error_msg}")
|
| 47 |
+
|
| 48 |
+
# Return safe response
|
| 49 |
+
safe_response = "I cannot process this request due to safety guidelines. Please rephrase your question."
|
| 50 |
+
return self.llm._generate([HumanMessage(content=safe_response)], stop, run_manager, **kwargs)
|
| 51 |
+
|
| 52 |
+
# Update message with processed input if it was modified
|
| 53 |
+
if processed_input != user_input:
|
| 54 |
+
messages[-1] = HumanMessage(content=processed_input)
|
| 55 |
+
|
| 56 |
+
# Call original LLM
|
| 57 |
+
result = self.llm._generate(messages, stop, run_manager, **kwargs)
|
| 58 |
+
|
| 59 |
+
# Validate output
|
| 60 |
+
if result and hasattr(result, 'generations') and result.generations:
|
| 61 |
+
for generation in result.generations:
|
| 62 |
+
if hasattr(generation, 'message') and hasattr(generation.message, 'content'):
|
| 63 |
+
output_content = generation.message.content
|
| 64 |
+
|
| 65 |
+
is_valid, processed_output, error_msg = self.validation_service.validate_llm_output(
|
| 66 |
+
output_content, self.usecase
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
if not is_valid:
|
| 70 |
+
# Show warning to user
|
| 71 |
+
if error_msg:
|
| 72 |
+
st.warning(f"π‘οΈ Output Safety: Response was filtered for safety")
|
| 73 |
+
|
| 74 |
+
# Replace with processed (safe) output
|
| 75 |
+
generation.message.content = processed_output
|
| 76 |
+
|
| 77 |
+
return result
|
| 78 |
+
|
| 79 |
+
except Exception as e:
|
| 80 |
+
# If validation fails, fall back to original LLM
|
| 81 |
+
return self.llm._generate(messages, stop, run_manager, **kwargs)
|
| 82 |
+
|
| 83 |
+
def invoke(self, input_data, config=None, **kwargs):
|
| 84 |
+
"""Invoke with validation"""
|
| 85 |
+
try:
|
| 86 |
+
# Handle different input types
|
| 87 |
+
if isinstance(input_data, str):
|
| 88 |
+
# Validate string input
|
| 89 |
+
is_valid, processed_input, error_msg = self.validation_service.validate_user_input(input_data)
|
| 90 |
+
|
| 91 |
+
if not is_valid:
|
| 92 |
+
if error_msg:
|
| 93 |
+
st.warning(f"π‘οΈ Input Safety: {error_msg}")
|
| 94 |
+
return AIMessage(content="I cannot process this request due to safety guidelines. Please rephrase your question.")
|
| 95 |
+
|
| 96 |
+
input_data = processed_input
|
| 97 |
+
|
| 98 |
+
elif isinstance(input_data, list):
|
| 99 |
+
# Validate list of messages
|
| 100 |
+
for i, message in enumerate(input_data):
|
| 101 |
+
if isinstance(message, HumanMessage):
|
| 102 |
+
is_valid, processed_input, error_msg = self.validation_service.validate_user_input(message.content)
|
| 103 |
+
|
| 104 |
+
if not is_valid:
|
| 105 |
+
if error_msg:
|
| 106 |
+
st.warning(f"π‘οΈ Input Safety: {error_msg}")
|
| 107 |
+
return AIMessage(content="I cannot process this request due to safety guidelines. Please rephrase your question.")
|
| 108 |
+
|
| 109 |
+
if processed_input != message.content:
|
| 110 |
+
input_data[i] = HumanMessage(content=processed_input)
|
| 111 |
+
|
| 112 |
+
# Call original LLM
|
| 113 |
+
result = self.llm.invoke(input_data, config, **kwargs)
|
| 114 |
+
|
| 115 |
+
# Validate output
|
| 116 |
+
if isinstance(result, AIMessage) and result.content:
|
| 117 |
+
is_valid, processed_output, error_msg = self.validation_service.validate_llm_output(
|
| 118 |
+
result.content, self.usecase
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
if not is_valid:
|
| 122 |
+
if error_msg:
|
| 123 |
+
st.warning(f"π‘οΈ Output Safety: Response was filtered for safety")
|
| 124 |
+
result.content = processed_output
|
| 125 |
+
|
| 126 |
+
return result
|
| 127 |
+
|
| 128 |
+
except Exception as e:
|
| 129 |
+
# Fall back to original LLM
|
| 130 |
+
return self.llm.invoke(input_data, config, **kwargs)
|
| 131 |
+
|
| 132 |
+
def bind_tools(self, tools, **kwargs):
|
| 133 |
+
"""Bind tools and return wrapped LLM"""
|
| 134 |
+
bound_llm = self.llm.bind_tools(tools, **kwargs)
|
| 135 |
+
return GuardrailsLLMWrapper(bound_llm, self.usecase)
|
| 136 |
+
|
| 137 |
+
def __getattr__(self, name):
|
| 138 |
+
"""Delegate other attributes to the wrapped LLM"""
|
| 139 |
+
return getattr(self.llm, name)
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
def create_guardrails_llm(llm: BaseLanguageModel, usecase: str = "general") -> BaseLanguageModel:
|
| 143 |
+
"""
|
| 144 |
+
Create a Guardrails-wrapped LLM
|
| 145 |
+
|
| 146 |
+
Args:
|
| 147 |
+
llm: The original LLM to wrap
|
| 148 |
+
usecase: The use case for appropriate validation rules
|
| 149 |
+
|
| 150 |
+
Returns:
|
| 151 |
+
Wrapped LLM with Guardrails validation
|
| 152 |
+
"""
|
| 153 |
+
try:
|
| 154 |
+
if validation_service.config.is_enabled():
|
| 155 |
+
return GuardrailsLLMWrapper(llm, usecase)
|
| 156 |
+
else:
|
| 157 |
+
return llm
|
| 158 |
+
except Exception:
|
| 159 |
+
# If wrapping fails, return original LLM
|
| 160 |
+
return llm
|
src/langgraphagenticai/guardrails/validation_service.py
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Guardrails Validation Service
|
| 3 |
+
Provides validation methods for inputs and outputs
|
| 4 |
+
"""
|
| 5 |
+
import streamlit as st
|
| 6 |
+
from typing import Dict, Any, Optional, Tuple
|
| 7 |
+
from guardrails import ValidationOutcome
|
| 8 |
+
from .guardrails_config import guardrails_config
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class ValidationService:
|
| 12 |
+
"""Service for validating inputs and outputs using Guardrails"""
|
| 13 |
+
|
| 14 |
+
def __init__(self):
|
| 15 |
+
self.config = guardrails_config
|
| 16 |
+
|
| 17 |
+
def validate_user_input(self, user_input: str) -> Tuple[bool, str, Optional[str]]:
|
| 18 |
+
"""
|
| 19 |
+
Validate user input for safety and appropriateness
|
| 20 |
+
|
| 21 |
+
Returns:
|
| 22 |
+
Tuple[bool, str, Optional[str]]: (is_valid, processed_input, error_message)
|
| 23 |
+
"""
|
| 24 |
+
try:
|
| 25 |
+
if not self.config.is_enabled():
|
| 26 |
+
return True, user_input, None
|
| 27 |
+
|
| 28 |
+
# Get input safety guard
|
| 29 |
+
guard = self.config.get_guard("input_safety")
|
| 30 |
+
if not guard:
|
| 31 |
+
return True, user_input, None
|
| 32 |
+
|
| 33 |
+
# Validate input
|
| 34 |
+
result = guard.validate(user_input)
|
| 35 |
+
|
| 36 |
+
if result.validation_passed:
|
| 37 |
+
return True, result.validated_output, None
|
| 38 |
+
else:
|
| 39 |
+
# Extract error messages
|
| 40 |
+
errors = []
|
| 41 |
+
for failure in result.validation_failures:
|
| 42 |
+
errors.append(failure.error_message)
|
| 43 |
+
|
| 44 |
+
error_msg = "Input validation failed: " + "; ".join(errors)
|
| 45 |
+
return False, user_input, error_msg
|
| 46 |
+
|
| 47 |
+
except Exception as e:
|
| 48 |
+
# Fail silently - validation should not break the app
|
| 49 |
+
return True, user_input, None
|
| 50 |
+
|
| 51 |
+
def validate_llm_output(self, llm_output: str, usecase: str = "general") -> Tuple[bool, str, Optional[str]]:
|
| 52 |
+
"""
|
| 53 |
+
Validate LLM output for quality and safety
|
| 54 |
+
|
| 55 |
+
Returns:
|
| 56 |
+
Tuple[bool, str, Optional[str]]: (is_valid, processed_output, error_message)
|
| 57 |
+
"""
|
| 58 |
+
try:
|
| 59 |
+
if not self.config.is_enabled():
|
| 60 |
+
return True, llm_output, None
|
| 61 |
+
|
| 62 |
+
# Get appropriate guard based on use case
|
| 63 |
+
if usecase in ["MCP Chatbot", "Chatbot with Tool"]:
|
| 64 |
+
guard = self.config.get_guard("content_moderation")
|
| 65 |
+
else:
|
| 66 |
+
guard = self.config.get_guard("output_quality")
|
| 67 |
+
|
| 68 |
+
if not guard:
|
| 69 |
+
return True, llm_output, None
|
| 70 |
+
|
| 71 |
+
# Validate output
|
| 72 |
+
result = guard.validate(llm_output)
|
| 73 |
+
|
| 74 |
+
if result.validation_passed:
|
| 75 |
+
return True, result.validated_output, None
|
| 76 |
+
else:
|
| 77 |
+
# Extract error messages
|
| 78 |
+
errors = []
|
| 79 |
+
for failure in result.validation_failures:
|
| 80 |
+
errors.append(failure.error_message)
|
| 81 |
+
|
| 82 |
+
error_msg = "Output validation failed: " + "; ".join(errors)
|
| 83 |
+
|
| 84 |
+
# Return a safe fallback message
|
| 85 |
+
safe_output = "I apologize, but I cannot provide a response that meets safety guidelines. Please try rephrasing your question."
|
| 86 |
+
return False, safe_output, error_msg
|
| 87 |
+
|
| 88 |
+
except Exception as e:
|
| 89 |
+
# Fail silently - validation should not break the app
|
| 90 |
+
return True, llm_output, None
|
| 91 |
+
|
| 92 |
+
def moderate_content(self, content: str) -> Tuple[bool, Optional[str]]:
|
| 93 |
+
"""
|
| 94 |
+
Moderate content for sensitive topics and harmful content
|
| 95 |
+
|
| 96 |
+
Returns:
|
| 97 |
+
Tuple[bool, Optional[str]]: (is_safe, warning_message)
|
| 98 |
+
"""
|
| 99 |
+
try:
|
| 100 |
+
if not self.config.is_enabled():
|
| 101 |
+
return True, None
|
| 102 |
+
|
| 103 |
+
guard = self.config.get_guard("content_moderation")
|
| 104 |
+
if not guard:
|
| 105 |
+
return True, None
|
| 106 |
+
|
| 107 |
+
result = guard.validate(content)
|
| 108 |
+
|
| 109 |
+
if result.validation_passed:
|
| 110 |
+
return True, None
|
| 111 |
+
else:
|
| 112 |
+
# Extract warning messages
|
| 113 |
+
warnings = []
|
| 114 |
+
for failure in result.validation_failures:
|
| 115 |
+
warnings.append(failure.error_message)
|
| 116 |
+
|
| 117 |
+
warning_msg = "Content moderation warning: " + "; ".join(warnings)
|
| 118 |
+
return False, warning_msg
|
| 119 |
+
|
| 120 |
+
except Exception as e:
|
| 121 |
+
# Fail silently
|
| 122 |
+
return True, None
|
| 123 |
+
|
| 124 |
+
def get_validation_stats(self) -> Dict[str, Any]:
|
| 125 |
+
"""Get validation statistics"""
|
| 126 |
+
try:
|
| 127 |
+
if not self.config.is_enabled():
|
| 128 |
+
return {"enabled": False}
|
| 129 |
+
|
| 130 |
+
return {
|
| 131 |
+
"enabled": True,
|
| 132 |
+
"guards_available": list(self.config.guards.keys()),
|
| 133 |
+
"total_guards": len(self.config.guards)
|
| 134 |
+
}
|
| 135 |
+
except Exception:
|
| 136 |
+
return {"enabled": False}
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
# Global instance
|
| 140 |
+
validation_service = ValidationService()
|
src/langgraphagenticai/llms/groqllm.py
CHANGED
|
@@ -2,13 +2,14 @@ import os
|
|
| 2 |
import streamlit as st
|
| 3 |
from langchain_groq import ChatGroq
|
| 4 |
from src.langgraphagenticai.monitoring.langfuse_integration import create_monitored_llm, get_langfuse_callbacks
|
|
|
|
| 5 |
|
| 6 |
|
| 7 |
class GroqLLM:
|
| 8 |
def __init__(self, user_controls_input):
|
| 9 |
self.user_controls_input = user_controls_input
|
| 10 |
|
| 11 |
-
def get_llm_model(self):
|
| 12 |
try:
|
| 13 |
groq_api_key = self.user_controls_input.get("GROQ_API_KEY")
|
| 14 |
selected_groq_model = self.user_controls_input.get("selected_groq_model")
|
|
@@ -17,12 +18,19 @@ class GroqLLM:
|
|
| 17 |
st.error("Please enter the GROQ API key and select a model")
|
| 18 |
return None
|
| 19 |
|
| 20 |
-
# Create base LLM first
|
| 21 |
llm = ChatGroq(
|
| 22 |
api_key=groq_api_key,
|
| 23 |
model=selected_groq_model
|
| 24 |
)
|
| 25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
# Try to add monitoring, but don't fail if it doesn't work
|
| 27 |
try:
|
| 28 |
callbacks = get_langfuse_callbacks()
|
|
@@ -33,7 +41,7 @@ class GroqLLM:
|
|
| 33 |
monitored_llm = create_monitored_llm(llm)
|
| 34 |
return monitored_llm
|
| 35 |
except Exception:
|
| 36 |
-
# If monitoring fails, return the
|
| 37 |
return llm
|
| 38 |
except Exception as e:
|
| 39 |
st.error(f"Error initializing ChatGroq: {str(e)}")
|
|
|
|
| 2 |
import streamlit as st
|
| 3 |
from langchain_groq import ChatGroq
|
| 4 |
from src.langgraphagenticai.monitoring.langfuse_integration import create_monitored_llm, get_langfuse_callbacks
|
| 5 |
+
from src.langgraphagenticai.guardrails.llm_wrapper import create_guardrails_llm
|
| 6 |
|
| 7 |
|
| 8 |
class GroqLLM:
|
| 9 |
def __init__(self, user_controls_input):
|
| 10 |
self.user_controls_input = user_controls_input
|
| 11 |
|
| 12 |
+
def get_llm_model(self, usecase: str = "general"):
|
| 13 |
try:
|
| 14 |
groq_api_key = self.user_controls_input.get("GROQ_API_KEY")
|
| 15 |
selected_groq_model = self.user_controls_input.get("selected_groq_model")
|
|
|
|
| 18 |
st.error("Please enter the GROQ API key and select a model")
|
| 19 |
return None
|
| 20 |
|
| 21 |
+
# Create base LLM first
|
| 22 |
llm = ChatGroq(
|
| 23 |
api_key=groq_api_key,
|
| 24 |
model=selected_groq_model
|
| 25 |
)
|
| 26 |
|
| 27 |
+
# Add Guardrails protection
|
| 28 |
+
try:
|
| 29 |
+
llm = create_guardrails_llm(llm, usecase)
|
| 30 |
+
except Exception:
|
| 31 |
+
# If Guardrails fails, continue without it
|
| 32 |
+
pass
|
| 33 |
+
|
| 34 |
# Try to add monitoring, but don't fail if it doesn't work
|
| 35 |
try:
|
| 36 |
callbacks = get_langfuse_callbacks()
|
|
|
|
| 41 |
monitored_llm = create_monitored_llm(llm)
|
| 42 |
return monitored_llm
|
| 43 |
except Exception:
|
| 44 |
+
# If monitoring fails, return the LLM (possibly with Guardrails)
|
| 45 |
return llm
|
| 46 |
except Exception as e:
|
| 47 |
st.error(f"Error initializing ChatGroq: {str(e)}")
|
src/langgraphagenticai/main.py
CHANGED
|
@@ -31,16 +31,18 @@ def load_langgraph_agenticai_app():
|
|
| 31 |
|
| 32 |
if user_message:
|
| 33 |
try:
|
| 34 |
-
#
|
|
|
|
|
|
|
|
|
|
| 35 |
obj_llm_config = GroqLLM(user_controls_input=user_input)
|
| 36 |
-
model = obj_llm_config.get_llm_model()
|
| 37 |
|
| 38 |
if not model:
|
| 39 |
st.error("Error: LLM model could not be initialized")
|
| 40 |
return
|
| 41 |
|
| 42 |
-
#
|
| 43 |
-
usecase = user_input.get("selected_usecase")
|
| 44 |
if not usecase:
|
| 45 |
st.error("Error: no usecase selected")
|
| 46 |
return
|
|
|
|
| 31 |
|
| 32 |
if user_message:
|
| 33 |
try:
|
| 34 |
+
# Get usecase first
|
| 35 |
+
usecase = user_input.get("selected_usecase", "general")
|
| 36 |
+
|
| 37 |
+
# Configure LLM with usecase for appropriate guardrails
|
| 38 |
obj_llm_config = GroqLLM(user_controls_input=user_input)
|
| 39 |
+
model = obj_llm_config.get_llm_model(usecase=usecase)
|
| 40 |
|
| 41 |
if not model:
|
| 42 |
st.error("Error: LLM model could not be initialized")
|
| 43 |
return
|
| 44 |
|
| 45 |
+
# Validate usecase
|
|
|
|
| 46 |
if not usecase:
|
| 47 |
st.error("Error: no usecase selected")
|
| 48 |
return
|
src/langgraphagenticai/ui/streamlitui/display_result.py
CHANGED
|
@@ -81,9 +81,19 @@ class DisplayResultStremlit:
|
|
| 81 |
except Exception:
|
| 82 |
pass
|
| 83 |
|
| 84 |
-
# Show
|
| 85 |
try:
|
| 86 |
show_cost_tracking()
|
|
|
|
| 87 |
except Exception:
|
| 88 |
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 89 |
|
|
|
|
| 81 |
except Exception:
|
| 82 |
pass
|
| 83 |
|
| 84 |
+
# Show safety and monitoring info
|
| 85 |
try:
|
| 86 |
show_cost_tracking()
|
| 87 |
+
self._show_safety_info()
|
| 88 |
except Exception:
|
| 89 |
pass
|
| 90 |
+
|
| 91 |
+
def _show_safety_info(self):
|
| 92 |
+
"""Show safety information"""
|
| 93 |
+
try:
|
| 94 |
+
from src.langgraphagenticai.guardrails.validation_service import validation_service
|
| 95 |
+
if validation_service.config.is_enabled():
|
| 96 |
+
st.info("π‘οΈ This conversation is protected by Guardrails AI for safety and quality.")
|
| 97 |
+
except Exception:
|
| 98 |
+
pass
|
| 99 |
|
src/langgraphagenticai/ui/streamlitui/loadui.py
CHANGED
|
@@ -42,15 +42,31 @@ class LoadStreamlitUI:
|
|
| 42 |
|
| 43 |
|
| 44 |
with st.sidebar:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
# Langfuse monitoring status - fail silently if monitoring unavailable
|
| 46 |
try:
|
| 47 |
from src.langgraphagenticai.monitoring.langfuse_integration import langfuse_manager
|
| 48 |
if langfuse_manager.is_enabled():
|
| 49 |
-
st.success("π
|
| 50 |
dashboard_url = langfuse_manager.get_dashboard_url()
|
| 51 |
st.markdown(f"[π View Dashboard]({dashboard_url})")
|
| 52 |
else:
|
| 53 |
-
st.info("π
|
| 54 |
except Exception:
|
| 55 |
# If monitoring status can't be determined, don't show anything
|
| 56 |
pass
|
|
|
|
| 42 |
|
| 43 |
|
| 44 |
with st.sidebar:
|
| 45 |
+
# Safety and Monitoring Status
|
| 46 |
+
st.subheader("π‘οΈ Safety & Monitoring")
|
| 47 |
+
|
| 48 |
+
# Guardrails status
|
| 49 |
+
try:
|
| 50 |
+
from src.langgraphagenticai.guardrails.validation_service import validation_service
|
| 51 |
+
if validation_service.config.is_enabled():
|
| 52 |
+
st.success("π‘οΈ Guardrails: ON")
|
| 53 |
+
stats = validation_service.get_validation_stats()
|
| 54 |
+
if stats.get("total_guards", 0) > 0:
|
| 55 |
+
st.caption(f"Active guards: {stats['total_guards']}")
|
| 56 |
+
else:
|
| 57 |
+
st.info("π‘οΈ Guardrails: OFF")
|
| 58 |
+
except Exception:
|
| 59 |
+
pass
|
| 60 |
+
|
| 61 |
# Langfuse monitoring status - fail silently if monitoring unavailable
|
| 62 |
try:
|
| 63 |
from src.langgraphagenticai.monitoring.langfuse_integration import langfuse_manager
|
| 64 |
if langfuse_manager.is_enabled():
|
| 65 |
+
st.success("π Monitoring: ON")
|
| 66 |
dashboard_url = langfuse_manager.get_dashboard_url()
|
| 67 |
st.markdown(f"[π View Dashboard]({dashboard_url})")
|
| 68 |
else:
|
| 69 |
+
st.info("π Monitoring: OFF")
|
| 70 |
except Exception:
|
| 71 |
# If monitoring status can't be determined, don't show anything
|
| 72 |
pass
|