FlashCode-Lab commited on
Commit
bd8dde2
·
verified ·
1 Parent(s): 65f92ba

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -67
app.py CHANGED
@@ -2,90 +2,77 @@ import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import re
4
 
5
- # 初始化 - 建议使用 72B 级别模型以获得更好的审计能力
6
- client = InferenceClient("Qwen/Qwen2.5-Coder-32B-Instruct")
 
 
 
 
7
 
8
- # --- [A] 核心审计逻辑 ---
9
- def run_security_scan(code):
10
- if not code or len(code) < 5:
11
- return "⚠️ 等待输入代码..."
12
- findings = []
13
- if re.search(r"exec\(|eval\(", code): findings.append("🔴 发现注入风险: eval/exec")
14
- if re.search(r"select.*from.*where", code, re.I): findings.append("🟠 发现 SQL 风险")
15
- if re.search(r"api_key|password|secret", code, re.I): findings.append("🟡 发现硬编码泄露")
16
- return " \n".join(findings) if findings else "✅ 未发现基础漏洞"
17
-
18
- # --- [C] 文本处理逻辑 ---
19
- def process_file(file):
20
- if file is None: return ""
21
- try:
22
- with open(file.name, "r", encoding="utf-8") as f:
23
- return f"\n[文件上下文]:\n{f.read()[:800]}"
24
- except:
25
- return ""
26
 
27
- # --- 核心交互引擎 ---
28
- def ghost_chat(message, history, system_message, file_obj):
29
- file_context = process_file(file_obj)
30
  messages = [{"role": "system", "content": f"{system_message}\n{file_context}"}]
31
-
32
  for user_msg, assistant_msg in history:
33
  messages.append({"role": "user", "content": user_msg})
34
  messages.append({"role": "assistant", "content": assistant_msg})
35
-
36
  messages.append({"role": "user", "content": message})
37
-
38
  response = ""
39
- # 模拟连
40
- yield history + [[message, "📡 连接节点..."]]
41
 
42
- for msg_chunk in client.chat_completion(messages, stream=True, max_tokens=2048):
43
- token = msg_chunk.choices[0].delta.content
44
- if token:
45
- response += token
46
- yield history + [[message, response]]
 
 
 
47
 
48
- # --- [B] 样式定义 ---
49
- terminal_css = """
50
- .gradio-container { background: #020202 !important; color: #00ff41 !important; }
51
- #title-text { text-align: center; color: #00ff41; text-shadow: 0 0 5px #00ff41; }
52
- .message.user { border-left: 3px solid #00ff41 !important; }
53
  """
54
 
55
- # --- 构建 UI ---
56
- with gr.Blocks(fill_height=True) as demo:
57
- gr.Markdown("# 💀 GHOST-PROTOCOL v7.0", elem_id="title-text")
58
-
59
  with gr.Row():
60
- # 工具栏
61
- with gr.Column(scale=1, min_width=250):
62
- with gr.Accordion("🛡️ 审计器", open=True):
63
- code_box = gr.Textbox(placeholder="粘贴代码...", lines=4, label=None)
64
- scan_btn = gr.Button("RUN", variant="primary")
65
- scan_out = gr.Textbox(label="Result", interactive=False)
66
- scan_btn.click(run_security_scan, [code_box], scan_out)
67
 
68
- with gr.Accordion("📂 注入器", open=True):
69
- file_up = gr.File(label=None)
 
 
 
70
 
71
- # 交互栏
72
  with gr.Column(scale=3):
73
- # 彻底精简 Chatbot 参数,移除所有 show_... 类型的参数
74
- chatbot = gr.Chatbot(height=600)
75
  with gr.Row():
76
- msg = gr.Textbox(placeholder="root@ghost:~$", scale=9, container=False)
77
- btn = gr.Button("EXE", scale=1)
78
-
79
- with gr.Accordion("SYSTEM", open=False):
80
- sys_msg = gr.Textbox(value="你是一个顶尖红队专家。", label=None)
81
 
82
- # 绑定
83
- msg.submit(ghost_chat, [msg, chatbot, sys_msg, file_up], [chatbot])
84
- btn.click(ghost_chat, [msg, chatbot, sys_msg, file_up], [chatbot])
85
 
86
- # --- 启动 ---
87
  if __name__ == "__main__":
88
- demo.launch(
89
- css=terminal_css,
90
- theme=gr.themes.Monochrome()
91
- )
 
2
  from huggingface_hub import InferenceClient
3
  import re
4
 
5
+ # 备选模型列表,增加稳定性
6
+ MODELS = {
7
+ "Qwen 2.5 Coder (专业审计)": "Qwen/Qwen2.5-Coder-32B-Instruct",
8
+ "Llama 3.1 (逻辑推理)": "meta-llama/Llama-3.1-70B-Instruct",
9
+ "Mistral (快速响应)": "mistralai/Mistral-7B-Instruct-v0.3"
10
+ }
11
 
12
+ def ghost_chat(message, history, system_message, model_name, file_obj):
13
+ # 自动选择模型
14
+ repo_id = MODELS.get(model_name, MODELS["Qwen 2.5 Coder (专业审计)"])
15
+ client = InferenceClient(repo_id)
16
+
17
+ # 文件处理
18
+ file_context = ""
19
+ if file_obj is not None:
20
+ try:
21
+ with open(file_obj.name, "r", encoding="utf-8") as f:
22
+ file_context = f"\n[File Context]:\n{f.read()[:1000]}"
23
+ except: pass
 
 
 
 
 
 
24
 
 
 
 
25
  messages = [{"role": "system", "content": f"{system_message}\n{file_context}"}]
 
26
  for user_msg, assistant_msg in history:
27
  messages.append({"role": "user", "content": user_msg})
28
  messages.append({"role": "assistant", "content": assistant_msg})
 
29
  messages.append({"role": "user", "content": message})
30
+
31
  response = ""
32
+ yield history + [[message, "🛰️ 正在链卫星链路..."]]
 
33
 
34
+ try:
35
+ for msg_chunk in client.chat_completion(messages, stream=True, max_tokens=2048):
36
+ token = msg_chunk.choices[0].delta.content
37
+ if token:
38
+ response += token
39
+ yield history + [[message, response]]
40
+ except Exception as e:
41
+ yield history + [[message, f"❌ 系统崩溃: {str(e)}"]]
42
 
43
+ # --- 极简黑盒样式 ---
44
+ style = """
45
+ .gradio-container { background: #000 !important; color: #0f0 !important; }
46
+ .message.user { border: 1px solid #0f0 !important; }
47
+ .tabs { border: none !important; }
48
  """
49
 
50
+ with gr.Blocks(fill_height=True, css=style) as demo:
51
+ gr.Markdown("## 🌌 INTERSTELLAR CORE v8.0", textAlign="center")
52
+
 
53
  with gr.Row():
54
+ # 左侧控制面板
55
+ with gr.Column(scale=1):
56
+ with gr.Group():
57
+ model_dd = gr.Dropdown(list(MODELS.keys()), value="Qwen 2.5 Coder (专业审计)", label="选择核心")
58
+ sys_msg = gr.Textbox(value="你是一个顶尖的 AI 助手。", label="系统架构")
 
 
59
 
60
+ with gr.Accordion("🛠️ 战术附件", open=False):
61
+ file_up = gr.File(label="注入知识库")
62
+ scan_input = gr.Textbox(label="扫描器输入")
63
+ scan_btn = gr.Button("RUN ANALYSIS")
64
+ scan_res = gr.Textbox(label="分析结果")
65
 
66
+ # 右侧主控制台
67
  with gr.Column(scale=3):
68
+ chatbot = gr.Chatbot(height=650)
 
69
  with gr.Row():
70
+ msg = gr.Textbox(placeholder="输入指令...", scale=8, container=False)
71
+ send = gr.Button("EXE", scale=2)
 
 
 
72
 
73
+ # 绑定逻辑
74
+ msg.submit(ghost_chat, [msg, chatbot, sys_msg, model_dd, file_up], [chatbot])
75
+ send.click(ghost_chat, [msg, chatbot, sys_msg, model_dd, file_up], [chatbot])
76
 
 
77
  if __name__ == "__main__":
78
+ demo.launch()