Spaces:
Sleeping
Sleeping
from fastapi import FastAPI, HTTPException | |
from pydantic import BaseModel | |
from llama_cpp import Llama | |
# Initialize FastAPI app | |
app = FastAPI() | |
# Load the Llama model | |
try: | |
llm = Llama.from_pretrained( | |
repo_id="QuantFactory/Lily-Cybersecurity-7B-v0.2-GGUF", | |
filename="Lily-Cybersecurity-7B-v0.2.Q3_K_S.gguf", | |
) | |
except Exception as e: | |
raise RuntimeError(f"Failed to load model: {e}") | |
# Define request model for log data | |
class LogRequest(BaseModel): | |
log_data: str | |
# Define response model | |
class AnalysisResponse(BaseModel): | |
analysis: str | |
# Define the route for security log analysis | |
async def analyze_security_logs(request: LogRequest): | |
try: | |
# Security-focused prompt | |
prompt = ( | |
"Analyze the following network log data for any indicators of malicious activity, " | |
"such as unusual IP addresses, unauthorized access attempts, data exfiltration, or anomalies. " | |
"Provide details on potential threats, IPs involved, and suggest actions if any threats are detected.\n\n" | |
f"{request.log_data}" | |
) | |
# Generate response from the model | |
response = llm.create_chat_completion( | |
messages=[ | |
{ | |
"role": "user", | |
"content": prompt | |
} | |
] | |
) | |
# Extract and return the analysis text | |
analysis_text = response["choices"][0]["message"]["content"] | |
return AnalysisResponse(analysis=analysis_text) | |
except Exception as e: | |
raise HTTPException(status_code=500, detail=str(e)) | |
# To run the app, use: uvicorn app:app --reload | |