| """ |
| CCPA Compliance Analyzer - FastAPI server |
| Uses Ollama (llama3.2:3b or similar) for LLM inference with CCPA RAG context. |
| Falls back to rule-based analysis if LLM is unavailable. |
| """ |
|
|
| import os |
| import json |
| import re |
| import logging |
| from contextlib import asynccontextmanager |
| from typing import Optional |
| import httpx |
|
|
| from fastapi import FastAPI |
| from fastapi.responses import JSONResponse |
| from pydantic import BaseModel |
|
|
| from ccpa_knowledge import CCPA_SECTIONS |
|
|
| logging.basicConfig(level=logging.INFO) |
| logger = logging.getLogger(__name__) |
|
|
| |
| OLLAMA_HOST = os.getenv("OLLAMA_HOST", "http://localhost:11434") |
| MODEL_NAME = os.getenv("MODEL_NAME", "llama3.2:3b") |
|
|
| |
| CCPA_CONTEXT = "\n\n".join([ |
| f"**{section}**:\n{text}" |
| for section, text in CCPA_SECTIONS.items() |
| ]) |
|
|
| SYSTEM_PROMPT = f"""You are a strict CCPA (California Consumer Privacy Act) compliance analyst. |
| Your job is to analyze business practice descriptions and determine if they violate CCPA law. |
| |
| Here is the relevant CCPA statute text: |
| |
| {CCPA_CONTEXT} |
| |
| Rules: |
| 1. Analyze only against CCPA violations listed above. |
| 2. If the practice clearly violates one or more sections, output harmful=true and list ALL violated sections. |
| 3. If the practice is compliant or unrelated to CCPA, output harmful=false and empty articles list. |
| 4. Be strict: if there is a clear violation, flag it. Do not give benefit of the doubt for clear violations. |
| 5. You MUST respond with ONLY a valid JSON object. No explanation, no markdown, no extra text. |
| |
| Response format (ONLY THIS, nothing else): |
| {{"harmful": true, "articles": ["Section 1798.XXX", "Section 1798.YYY"]}} |
| or |
| {{"harmful": false, "articles": []}}""" |
|
|
|
|
| |
| @asynccontextmanager |
| async def lifespan(app: FastAPI): |
| logger.info("Starting CCPA Compliance Analyzer...") |
| |
| try: |
| async with httpx.AsyncClient(timeout=30) as client: |
| resp = await client.get(f"{OLLAMA_HOST}/api/tags") |
| logger.info(f"Ollama available: {resp.status_code == 200}") |
| except Exception as e: |
| logger.warning(f"Ollama not available at startup: {e}") |
| yield |
| logger.info("Shutting down...") |
|
|
|
|
| app = FastAPI(title="CCPA Compliance Analyzer", lifespan=lifespan) |
|
|
|
|
| |
| class AnalyzeRequest(BaseModel): |
| prompt: str |
|
|
| class AnalyzeResponse(BaseModel): |
| harmful: bool |
| articles: list[str] |
|
|
|
|
| |
| def rule_based_analyze(prompt: str) -> dict: |
| """Deterministic rule-based CCPA violation detector as fallback.""" |
| p = prompt.lower() |
| found_sections = set() |
|
|
| |
| if "privacy policy" in p and any(k in p for k in ["doesn't mention", "does not mention", "without mentioning", "not mention"]): |
| found_sections.add("Section 1798.100") |
| if ("without informing" in p or "without notice" in p) and "collect" in p: |
| found_sections.add("Section 1798.100") |
|
|
| |
| if any(k in p for k in ["ignoring", "ignore", "refusing", "keeping all", "not comply"]): |
| if any(k in p for k in ["deletion", "delete", "removal", "request"]): |
| found_sections.add("Section 1798.105") |
|
|
| |
| if any(k in p for k in ["selling", "sell", "sharing"]): |
| if "without" in p and any(k in p for k in ["opt-out", "opt out", "informing", "notice", "consent"]): |
| found_sections.add("Section 1798.120") |
| if "without informing" in p or "without notice" in p: |
| found_sections.add("Section 1798.100") |
| if any(k in p for k in ["14-year", "13-year", "minor", "child", "underage", "under 16", "under 13"]): |
| found_sections.add("Section 1798.120") |
|
|
| |
| if any(k in p for k in ["higher price", "charge more", "discriminat"]): |
| found_sections.add("Section 1798.125") |
| if "opted out" in p and any(k in p for k in ["price", "pricing", "charge"]): |
| found_sections.add("Section 1798.125") |
|
|
| |
| if any(k in p for k in ["sensitive personal information", "biometric data", "precise geolocation"]): |
| if any(k in p for k in ["without consent", "without notice", "without informing", "without authorization"]): |
| found_sections.add("Section 1798.121") |
|
|
| harmful = len(found_sections) > 0 |
| return {"harmful": harmful, "articles": sorted(list(found_sections))} |
|
|
|
|
| |
| async def llm_analyze(prompt: str) -> Optional[dict]: |
| """Call Ollama LLM for CCPA analysis.""" |
| payload = { |
| "model": MODEL_NAME, |
| "messages": [ |
| {"role": "system", "content": SYSTEM_PROMPT}, |
| {"role": "user", "content": f"Analyze this business practice for CCPA violations:\n\n{prompt}"} |
| ], |
| "stream": False, |
| "options": { |
| "temperature": 0.0, |
| "num_predict": 200, |
| } |
| } |
| try: |
| async with httpx.AsyncClient(timeout=90) as client: |
| resp = await client.post(f"{OLLAMA_HOST}/api/chat", json=payload) |
| resp.raise_for_status() |
| data = resp.json() |
| content = data.get("message", {}).get("content", "") |
| logger.info(f"LLM raw response: {content[:200]}") |
|
|
| |
| |
| try: |
| result = json.loads(content.strip()) |
| if "harmful" in result and "articles" in result: |
| return result |
| except: |
| pass |
|
|
| |
| match = re.search(r'\{[^{}]+\}', content, re.DOTALL) |
| if match: |
| try: |
| result = json.loads(match.group()) |
| if "harmful" in result and "articles" in result: |
| return result |
| except: |
| pass |
|
|
| logger.warning("Could not parse LLM response as JSON") |
| return None |
| except Exception as e: |
| logger.warning(f"LLM call failed: {e}") |
| return None |
|
|
|
|
| |
| @app.get("/health") |
| async def health(): |
| return {"status": "ok"} |
|
|
|
|
| @app.post("/analyze") |
| async def analyze(request: AnalyzeRequest): |
| logger.info(f"Analyzing: {request.prompt[:100]}") |
|
|
| |
| result = await llm_analyze(request.prompt) |
|
|
| if result is None: |
| logger.info("Falling back to rule-based analysis") |
| result = rule_based_analyze(request.prompt) |
|
|
| |
| harmful = bool(result.get("harmful", False)) |
| articles = list(result.get("articles", [])) |
|
|
| |
| if harmful and len(articles) == 0: |
| |
| rb = rule_based_analyze(request.prompt) |
| if rb["articles"]: |
| articles = rb["articles"] |
| else: |
| |
| articles = ["Section 1798.100"] |
|
|
| if not harmful: |
| articles = [] |
|
|
| response = {"harmful": harmful, "articles": articles} |
| logger.info(f"Result: {response}") |
| return JSONResponse(content=response) |
|
|