spanofzero commited on
Commit
489ca1d
·
verified ·
1 Parent(s): ce44f5d

final dual

Browse files
Files changed (1) hide show
  1. app.py +126 -131
app.py CHANGED
@@ -1,166 +1,161 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  from array import array
4
- import os
5
- import re
6
- import time
7
 
8
- # Securely retrieve the token from your Space's secrets
9
  HF_TOKEN = os.getenv("HF_TOKEN")
 
 
10
 
11
- # Initialize BOTH engines with the exact same base model
12
- client_primary = InferenceClient("Qwen/Qwen2.5-7B-Instruct", token=HF_TOKEN)
13
- client_competitor = InferenceClient("Qwen/Qwen2.5-7B-Instruct", token=HF_TOKEN)
14
-
15
  class StateController:
16
- __slots__ = ("_state", "_metric", "_batch", "_reg", "_rendered", "_rom60", "_symbols")
17
-
18
  def __init__(self):
19
  self._state = array("B", [0]) * 121
20
- self._metric = 60
21
- self._batch = 10
22
- self._reg = {}
23
- self._rendered = self._build_render()
24
-
25
- self._rom60 = tuple(
26
- tuple((i * j) % 60 for j in range(60))
27
- for i in range(60)
28
- )
29
- self._symbols = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567"
30
-
31
- def _build_render(self) -> str:
32
- return "".join(
33
- " [NODE_120] " if i == 120 else ("<" if i % 10 == 0 else ".")
34
- for i in range(121)
35
- )
36
-
37
- def diagnostic(self) -> str:
38
- for i in range(51):
39
- self._state[i] = i % self._batch
40
- self._reg.clear()
41
- self._reg["STATUS"] = "RESOLVED"
42
- return (
43
- "Diagnostic sequence initiated.\n\n"
44
- "Grid initialized: 5 active blocks.\n\n"
45
- "Rendering 121-point array:\n"
46
- f"{self._rendered}\n\n"
47
- "Executing state resolution:\n"
48
- "System resolved. State array reset to zero."
49
- )
50
-
51
- def generate_receipt(self, a: int, b: int, c: int) -> str:
52
- rom_val = self._rom60[a % 60][b % 60]
53
- checksum_index = (rom_val ^ (c % 60)) % 60
54
- return f"0{self._symbols[checksum_index]}"
55
-
56
- def validate_receipt(self, receipt: str, a: int, b: int, c: int) -> str:
57
- expected_receipt = self.generate_receipt(a, b, c)
58
- if receipt == expected_receipt:
59
- self._state[120] = 1
60
- return f"[NODE_120: ACTIVE] Checksum Validated. Receipt {receipt} matches allocation ({a}, {b}, {c})."
61
- else:
62
- self._state[120] = 0
63
- return f"[NODE_120: INACTIVE] Verification Failed. Expected receipt {expected_receipt}, received {receipt}."
64
 
65
  controller = StateController()
66
 
67
- PRIMARY_SYSTEM_MSG = {"role": "system", "content": "You are a logic-focused inference engine. Utilize strict state-hold memory and parallel integer blocks."}
68
- COMPETITOR_SYSTEM_MSG = {"role": "system", "content": "You are a standard helpful AI assistant."}
69
-
70
- def generate_responses(user_message: str, primary_history: list, competitor_history: list):
71
- clean_message = user_message.strip()
72
- if not clean_message:
73
- yield primary_history, competitor_history, ""
74
- return
75
-
76
- # Update histories with new dictionary format for Gradio 6
77
- primary_history.append({"role": "user", "content": clean_message})
78
- primary_history.append({"role": "assistant", "content": ""})
79
- competitor_history.append({"role": "user", "content": clean_message})
80
- competitor_history.append({"role": "assistant", "content": ""})
81
- yield primary_history, competitor_history, ""
82
 
83
  start_time = time.perf_counter()
84
 
85
- # Hardware diagnostic intercept
86
- if clean_message.lower() == "run grid diagnostic":
87
- output = controller.diagnostic()
88
- elapsed_time = time.perf_counter() - start_time
89
- primary_history[-1]["content"] = f"{output}\n\n---\n*Telemetry: {elapsed_time:.4f}s | Source: Local Engine*"
90
- competitor_history[-1]["content"] = "Hardware diagnostics not supported by generic models."
91
- yield primary_history, competitor_history, ""
92
- return
93
-
94
- # Deterministic Checksum Intercept
95
- verify_match = re.search(r"verify receipt\s+([a-zA-Z0-9]{2})\s+for\s+(\d+),\s*(\d+),\s*(\d+)", clean_message, re.IGNORECASE)
96
- if verify_match:
97
- receipt, a, b, c = verify_match.group(1), int(verify_match.group(2)), int(verify_match.group(3)), int(verify_match.group(4))
98
- output = controller.validate_receipt(receipt, a, b, c)
99
- elapsed_time = time.perf_counter() - start_time
100
- primary_history[-1]["content"] = f"{output}\n\n---\n*Telemetry: {elapsed_time:.6f}s | Source: Local ROM Math*"
101
- competitor_history[-1]["content"] = "Deterministic verification not supported by standard LLMs."
102
- yield primary_history, competitor_history, ""
103
- return
104
-
105
- # STREAM 1: Primary Engine
106
- try:
107
- msgs = [PRIMARY_SYSTEM_MSG] + primary_history[:-1]
108
- primary_response = ""
109
- stream = client_primary.chat_completion(messages=msgs, max_tokens=1024, stream=True, temperature=0.15)
110
- for chunk in stream:
111
- primary_response += (chunk.choices[0].delta.content or "")
112
- primary_history[-1]["content"] = primary_response
113
- yield primary_history, competitor_history, ""
114
-
115
- primary_time = time.perf_counter() - start_time
116
- primary_history[-1]["content"] += f"\n\n---\n*Telemetry: {primary_time:.2f}s | Source: Augmented Kernel*"
117
- yield primary_history, competitor_history, ""
118
- except Exception as e:
119
- primary_history[-1]["content"] = f"Error: {str(e)}"
120
- yield primary_history, competitor_history, ""
121
 
122
- # STREAM 2: Competitor Engine
123
- competitor_start = time.perf_counter()
124
- competitor_history[-1]["content"] = "*Connecting to vanilla infrastructure...*"
125
- yield primary_history, competitor_history, ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
 
127
  try:
128
- msgs = [COMPETITOR_SYSTEM_MSG] + competitor_history[:-1]
129
- competitor_response = ""
130
- stream = client_competitor.chat_completion(messages=msgs, max_tokens=1024, stream=True, temperature=0.7)
 
 
131
  for chunk in stream:
132
- competitor_response += (chunk.choices[0].delta.content or "")
133
- competitor_history[-1]["content"] = competitor_response
134
- yield primary_history, competitor_history, ""
135
-
136
- competitor_time = time.perf_counter() - competitor_start
137
- competitor_history[-1]["content"] += f"\n\n---\n*Telemetry: {competitor_time:.2f}s | Source: Vanilla Qwen*"
138
- yield primary_history, competitor_history, ""
139
  except Exception as e:
140
- competitor_history[-1]["content"] = f"Error: {str(e)}"
141
- yield primary_history, competitor_history, ""
142
 
 
143
  custom_css = """
144
- body, .gradio-container { background-color: #110c08 !important; }
145
- footer {display: none !important}
146
- .message-row { gap: 10px !important; }
147
  """
148
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149
  with gr.Blocks() as demo:
150
  gr.Markdown("# [ GLYPH.IO ]\n### Dual-Engine Hardware Benchmark")
151
 
152
- primary_chat = gr.Chatbot(label="Augmented Logic Kernel", height=320, type="messages")
 
153
 
154
  with gr.Row():
155
- msg_input = gr.Textbox(label="Message", placeholder="Enter logic task...", scale=8)
156
  submit_btn = gr.Button("Execute", scale=1, variant="primary")
157
-
158
- gr.Examples(examples=["Calculate the integer distribution for 120 units across 3 nodes.", "Run grid diagnostic"], inputs=msg_input)
159
 
160
- competitor_chat = gr.Chatbot(label="Vanilla Qwen 2.5", height=320, type="messages")
 
 
 
 
161
 
162
- msg_input.submit(generate_responses, [msg_input, primary_chat, competitor_chat], [primary_chat, competitor_chat, msg_input])
163
- submit_btn.click(generate_responses, [msg_input, primary_chat, competitor_chat], [primary_chat, competitor_chat, msg_input])
164
 
165
  if __name__ == "__main__":
166
  demo.queue().launch(theme=gr.themes.Soft(primary_hue="orange"), css=custom_css)
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  from array import array
4
+ from functools import lru_cache
5
+ import os, re, time
 
6
 
7
+ # 1. API Configuration - Locked to the stable 7B model
8
  HF_TOKEN = os.getenv("HF_TOKEN")
9
+ MODEL_ID = "Qwen/Qwen2.5-7B-Instruct"
10
+ client = InferenceClient(MODEL_ID, token=HF_TOKEN)
11
 
12
+ # 2. T3 High-Speed Logic Kernel
 
 
 
13
  class StateController:
14
+ __slots__ = ("_state", "_rom60", "_symbols", "_rendered")
 
15
  def __init__(self):
16
  self._state = array("B", [0]) * 121
17
+ self._rom60 = tuple(tuple((i * j) % 60 for j in range(60)) for i in range(60))
18
+ self._symbols = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567"
19
+ self._rendered = "".join(" [NODE_120] " if i == 120 else ("<" if i % 10 == 0 else ".") for i in range(121))
20
+
21
+ @lru_cache(maxsize=128)
22
+ def compute_distribution(self, total, nodes) -> str:
23
+ if nodes <= 0: return "Error: Node count must be positive."
24
+ base, rem = divmod(total, nodes)
25
+ res = f"T3 Logic Kernel resolved {total} units across {nodes} nodes:\n\n"
26
+ for i in range(nodes):
27
+ res += f"NODE_{i+1:02}: {base + (1 if i < rem else 0)} units\n"
28
+ return res
29
+
30
+ def get_glyphs(self) -> str:
31
+ return f"Rendering 121-point state array:\n\n{self._rendered}\n\nSystem State: RESOLVED"
32
+
33
+ def generate_receipt(self, a, b, c) -> str:
34
+ idx = (self._rom60[a % 60][b % 60] ^ (c % 60)) % 60
35
+ return f"0{self._symbols[idx]}"
36
+
37
+ def validate_receipt(self, receipt, a, b, c) -> str:
38
+ expected = self.generate_receipt(a, b, c)
39
+ if receipt == expected:
40
+ return f" CHECKSUM VALID: Receipt {receipt} verified for allocation ({a}, {b}, {c})."
41
+ return f"× CHECKSUM INVALID: Expected {expected}, received {receipt}."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
 
43
  controller = StateController()
44
 
45
+ def format_telemetry(seconds: float) -> str:
46
+ if seconds < 0.001: return f"{seconds * 1_000_000:.2f} \u03BCs"
47
+ return f"{seconds * 1_000:.2f} ms" if seconds < 1 else f"{seconds:.2f} s"
48
+
49
+ # 3. Core Response Logic
50
+ def generate_responses(user_message, p_hist, c_hist):
51
+ msg = user_message.strip()
52
+ if not msg: yield p_hist or [], c_hist or [], ""; return
53
+
54
+ p_hist, c_hist = p_hist or [], c_hist or []
55
+ p_hist.append({"role": "user", "content": msg})
56
+ p_hist.append({"role": "assistant", "content": ""})
57
+ c_hist.append({"role": "user", "content": msg})
58
+ c_hist.append({"role": "assistant", "content": ""})
59
+ yield p_hist, c_hist, ""
60
 
61
  start_time = time.perf_counter()
62
 
63
+ # --- LOCAL INTERCEPTORS ---
64
+ dist_match = re.search(r"(\d+)\s+units\s+across\s+(\d+)\s+nodes", msg, re.IGNORECASE)
65
+ diag_match = any(kw in msg.lower() for kw in ["diagnostic", "grid"])
66
+ rcpt_match = re.search(r"verify receipt\s+([a-zA-Z0-9]{2})\s+for\s+(\d+),\s*(\d+),\s*(\d+)", msg, re.IGNORECASE)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
 
68
+ if dist_match or diag_match or rcpt_match:
69
+ if dist_match:
70
+ res = controller.compute_distribution(int(dist_match.group(1)), int(dist_match.group(2)))
71
+ elif rcpt_match:
72
+ res = controller.validate_receipt(rcpt_match.group(1), int(rcpt_match.group(2)), int(rcpt_match.group(3)), int(rcpt_match.group(4)))
73
+ else:
74
+ res = controller.get_glyphs()
75
+
76
+ elapsed = time.perf_counter() - start_time
77
+ p_hist[-1]["content"] = f"{res}\n\n---\n*Telemetry: {format_telemetry(elapsed)} | Source: LOCAL T3 KERNEL*"
78
+ yield p_hist, c_hist, ""
79
+ else:
80
+ try:
81
+ res_text = ""
82
+ stream = client.chat_completion(
83
+ messages=[{"role":"system","content":"T3 Augmented Logic Engine"}] + p_hist[:-1],
84
+ max_tokens=512, stream=True, temperature=0.1
85
+ )
86
+ for chunk in stream:
87
+ res_text += (chunk.choices[0].delta.content or "")
88
+ p_hist[-1]["content"] = res_text
89
+ yield p_hist, c_hist, ""
90
+ p_hist[-1]["content"] += f"\n\n---\n*Telemetry: {format_telemetry(time.perf_counter()-start_time)} | Source: AUGMENTED CLOUD*"
91
+ yield p_hist, c_hist, ""
92
+ except Exception as e:
93
+ p_hist[-1]["content"] = f"Primary Error: {str(e)}"
94
+ yield p_hist, c_hist, ""
95
+
96
+ comp_start = time.perf_counter()
97
+ c_hist[-1]["content"] = "*Routing through standard infrastructure...*"
98
+ yield p_hist, c_hist, ""
99
 
100
  try:
101
+ res_text = ""
102
+ stream = client.chat_completion(
103
+ messages=[{"role":"system","content":"Vanilla AI"}] + c_hist[:-1],
104
+ max_tokens=512, stream=True, temperature=0.7
105
+ )
106
  for chunk in stream:
107
+ res_text += (chunk.choices[0].delta.content or "")
108
+ c_hist[-1]["content"] = res_text
109
+ yield p_hist, c_hist, ""
110
+ c_hist[-1]["content"] += f"\n\n---\n*Telemetry: {format_telemetry(time.perf_counter()-comp_start)} | Source: VANILLA CLOUD*"
111
+ yield p_hist, c_hist, ""
 
 
112
  except Exception as e:
113
+ c_hist[-1]["content"] = f"Competitor Error: {str(e)}"
114
+ yield p_hist, c_hist, ""
115
 
116
+ # 4. Interface Build (With Scrollable Container & NO 'type' attributes)
117
  custom_css = """
118
+ body, .gradio-container { background-color: #110c08 !important; color: #fb923c !important; }
119
+ footer { display: none !important; }
120
+ #scrollable-box { max-height: 160px; overflow-y: auto; border: 1px solid #333; padding: 5px; border-radius: 8px; margin-bottom: 10px; }
121
  """
122
 
123
+ example_prompts = [
124
+ ["Run grid diagnostic"],
125
+ ["Calculate the integer distribution for 50000 units across 12 nodes."],
126
+ ["Define P vs. NP. Then validate a 120-unit distribution across 3 nodes."],
127
+ ["Execute a Tier-3 Distribution Audit for 8593 units across 14 nodes."],
128
+ ["Verify receipt 0e for 60, 30, 30"],
129
+ ["Distribute 1000000 units across 7 nodes."],
130
+ ["Perform a hardware grid initialization and diagnostic check."],
131
+ ["Allocate exactly 2048 units across 16 nodes for cluster balancing."],
132
+ ["Explain the theory of relativity. Then process 999 units across 9 nodes."],
133
+ ["Run a full system diagnostic on the logical array."],
134
+ ["Load balance 123456789 units across 256 nodes."],
135
+ ["Draft an email to the logistics team. Then route 400 units across 5 nodes."],
136
+ ["Initialize grid memory matrix and verify logic gate alignment."],
137
+ ["Evaluate node efficiency for 7777 units across 11 nodes."],
138
+ ["Explain how standard AI struggles with deterministic mathematical verification."]
139
+ ]
140
+
141
  with gr.Blocks() as demo:
142
  gr.Markdown("# [ GLYPH.IO ]\n### Dual-Engine Hardware Benchmark")
143
 
144
+ # 100% clean Chatbots with NO 'type="messages"' argument to prevent crashes
145
+ p_chat = gr.Chatbot(label="Augmented Logic Kernel (T3 Architecture)", height=350)
146
 
147
  with gr.Row():
148
+ msg_in = gr.Textbox(label="Message", placeholder="Test P vs NP or Logistics Distribution...", scale=8)
149
  submit_btn = gr.Button("Execute", scale=1, variant="primary")
 
 
150
 
151
+ with gr.Column(elem_id="scrollable-box"):
152
+ gr.Examples(examples=example_prompts, inputs=msg_in, label="Diagnostic Test Suite (Scroll for more)")
153
+
154
+ # 100% clean Chatbots with NO 'type="messages"' argument to prevent crashes
155
+ c_chat = gr.Chatbot(label="Vanilla Qwen 2.5 (Standard Infrastructure)", height=350)
156
 
157
+ msg_in.submit(generate_responses, [msg_in, p_chat, c_chat], [p_chat, c_chat, msg_in])
158
+ submit_btn.click(generate_responses, [msg_in, p_chat, c_chat], [p_chat, c_chat, msg_in])
159
 
160
  if __name__ == "__main__":
161
  demo.queue().launch(theme=gr.themes.Soft(primary_hue="orange"), css=custom_css)