---
title: "Evaluations"
description: "Use LLMs to evaluate AI application quality, safety, and performance with automated scoring and detailed analysis"
---

import LLMJudge from '/snippets/llm-as-a-judge.mdx';

<LLMJudge />

---

<CardGroup cols={3}>
  <Card title="Quickstart: LLM Guardrails" href="/latest/openlit/quickstart-guard" icon='bolt'>
    Protect and secure your LLM responses in 2 simple steps
  </Card>
  <Card title="Integrations" href="/latest/sdk/integrations/overview" icon='circle-nodes'>
    60+ AI integrations with automatic instrumentation and performance tracking
  </Card>
  <Card title="Create a dashboard" href="/latest/openlit/dashboards/overview" icon='grid'>
    Create custom visualizations with flexible widgets, queries, and real-time AI monitoring
  </Card>
</CardGroup>