# Python-specific dangerous patterns
from llm_sandbox import SecurityPattern
from llm_sandbox.security import RestrictedModule, SecurityIssueSeverity, SecurityPolicy

PYTHON_PATTERNS = [
    # Dynamic imports
    SecurityPattern(
        pattern=r"\b__import__\s*\(",
        description="Dynamic imports",
        severity=SecurityIssueSeverity.MEDIUM
    ),

    # Attribute manipulation
    SecurityPattern(
        pattern=r"\b(getattr|setattr|delattr)\s*\(",
        description="Dynamic attributes",
        severity=SecurityIssueSeverity.LOW
    ),

    # Pickle operations (deserialization risk)
    SecurityPattern(
        pattern=r"\bpickle\.(loads?|load)\s*\(",
        description="Pickle deserialization",
        severity=SecurityIssueSeverity.MEDIUM
    ),

    # Code execution
    SecurityPattern(
        pattern=r"\b(eval|exec|compile)\s*\(",
        description="Code execution",
        severity=SecurityIssueSeverity.HIGH
    )
]

# Python-specific restricted modules
PYTHON_MODULES = [
    RestrictedModule(
        name="os",
        description="Operating system interface",
        severity=SecurityIssueSeverity.HIGH
    ),
    RestrictedModule(
        name="subprocess",
        description="Process execution",
        severity=SecurityIssueSeverity.HIGH
    ),
    RestrictedModule(
        name="ctypes",
        description="Foreign function library",
        severity=SecurityIssueSeverity.HIGH
    ),
    RestrictedModule(
        name="importlib",
        description="Dynamic imports",
        severity=SecurityIssueSeverity.MEDIUM
    )
]
PYTHON_POLICY = SecurityPolicy(
    severity_threshold=SecurityIssueSeverity.MEDIUM,
    patterns=PYTHON_PATTERNS,
    restricted_modules=PYTHON_MODULES,

)
