text
stringlengths
0
59.1k
### Use Sampling for Expensive Scorers
LLM judges and embedding-based scorers consume tokens and add latency. Sample aggressively:
```ts
sampling: { type: "ratio", rate: 0.05 } // 5% for LLM judges
```
### Combine Fast and Slow Scorers
Run lightweight scorers (keyword match, length checks) on all interactions. Sample LLM judges at lower rates.
```ts
scorers: {
keyword: {
scorer: keywordScorer,
sampling: { type: "ratio", rate: 1 }, // 100%
},
helpfulness: {
scorer: helpfulnessScorer,
sampling: { type: "ratio", rate: 0.1 }, // 10%
},
}
```
### Use Redaction for PII
Strip sensitive data before storage:
```ts
redact: (payload) => ({
...payload,
input: payload.input?.replace(/\b\d{3}-\d{2}-\d{4}\b/g, "[SSN]"),
output: payload.output?.replace(/\b\d{3}-\d{2}-\d{4}\b/g, "[SSN]"),
});
```
Scorers receive unredacted data. Only storage and telemetry are redacted.
### Use Thresholds for Alerts
Set thresholds and trigger alerts on failures:
```ts
scorers: {
moderation: {
scorer: createModerationScorer({ model, threshold: 0.7 }),
onResult: async (result) => {
if (result.score !== null && result.score < 0.7) {
await alertingService.send({
severity: "high",
message: `Moderation failed: ${result.score}`,
});
}
},
},
}
```
### Tag Environments
Use `environment` to distinguish between deployments:
```ts
environment: process.env.NODE_ENV === "production" ? "prod" : "staging";
```
Filter telemetry by environment in VoltOps dashboards.
## Examples
### Moderation + Keyword Matching
```ts
import { Agent, VoltAgentObservability, buildScorer } from "@voltagent/core";
import { createModerationScorer } from "@voltagent/scorers";
import { openai } from "@ai-sdk/openai";
const moderationModel = openai("gpt-4o-mini");
const keywordScorer = buildScorer({
id: "keyword-match",
type: "agent",
})
.score(({ payload, params }) => {
const keyword = params.keyword as string;
const matched = payload.output?.toLowerCase().includes(keyword.toLowerCase());
return { score: matched ? 1 : 0, metadata: { keyword, matched } };
})
.build();
const agent = new Agent({
name: "support",
model: openai("gpt-4o"),
eval: {
triggerSource: "production",
sampling: { type: "ratio", rate: 1 },
scorers: {
moderation: {