File size: 2,313 Bytes
c035ad5
 
 
 
 
 
 
 
 
 
 
8d9c3ff
c035ad5
 
 
 
 
 
 
 
 
 
8d9c3ff
c035ad5
8d9c3ff
c035ad5
8d9c3ff
c035ad5
 
 
 
 
8d9c3ff
 
 
 
 
 
 
 
c035ad5
 
 
 
 
8d9c3ff
 
c035ad5
 
8d9c3ff
 
 
 
 
 
 
 
 
 
 
 
 
c035ad5
8d9c3ff
c035ad5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import gradio as gr
import requests
import os

# Model settings
MODEL_NAME = "Canstralian/pentest_ai"
HF_API_TOKEN = os.getenv("HF_API_TOKEN")

# Function to query the Hugging Face model
def query_hf(prompt):
    headers = {"Authorization": f"Bearer {HF_API_TOKEN}"}
    payload = {"inputs": prompt, "parameters": {"max_new_tokens": 300, "return_full_text": False}}
    try:
        response = requests.post(
            f"https://api-inference.huggingface.co/models/{MODEL_NAME}",
            headers=headers,
            json=payload
        )
        response.raise_for_status()  # Raise an error for bad responses
        data = response.json()
        # Handle different response formats
        if isinstance(data, list) and "generated_text" in data[0]:
            return data[0]["generated_text"].strip()
        elif isinstance(data, dict) and "generated_text" in data:
            return data["generated_text"].strip()
        else:
            return str(data).strip()  # Fallback to string representation
    except Exception as e:
        return f"Error querying model: {str(e)}"

# Chat function for Gradio
def chat_fn(message, history):
    # Initialize history if empty
    if not history:
        history = []
    
    # Create prompt with history
    prompt = "You are a cybersecurity expert specializing in penetration testing. Provide clear, ethical, and actionable steps.\n"
    for msg in history:
        prompt += f"User: {msg['user']}\nAssistant: {msg['assistant']}\n"
    prompt += f"User: {message}\nAssistant: "

    # Get response from the model
    response = query_hf(prompt)
    
    # Return user and assistant messages as dictionaries
    return {"user": message, "assistant": response}

# Create Gradio interface
with gr.Blocks() as demo:
    chatbot = gr.Chatbot(type="messages")
    msg = gr.Textbox(placeholder="Ask about pentesting (e.g., 'How do I scan with Nmap?')")
    clear = gr.Button("Clear Chat")
    
    def submit_message(message, chatbot):
        history = chatbot if chatbot else []
        response = chat_fn(message, history)
        history.append(response)
        return history, ""

    msg.submit(submit_message, [msg, chatbot], [chatbot, msg])
    clear.click(lambda: [], None, chatbot)

# Launch the app with custom title and description
demo.launch()