Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| from huggingface_hub import InferenceClient | |
| from array import array | |
| import os | |
| import re | |
| import time | |
| # Securely retrieve the token from Space secrets | |
| HF_TOKEN = os.getenv("HF_TOKEN") | |
| # Initialize the inference client | |
| client = InferenceClient("Qwen/Qwen2.5-7B-Instruct", token=HF_TOKEN) | |
| class StateController: | |
| __slots__ = ("_state", "_metric", "_batch", "_reg", "_rendered", "_rom60", "_symbols") | |
| def __init__(self): | |
| self._state = array("B", [0]) * 121 | |
| self._metric = 60 | |
| self._batch = 10 | |
| self._reg = {} | |
| self._rendered = self._build_render() | |
| # Pre-compute the 60x60 lookup ROM for O(1) verification | |
| self._rom60 = tuple( | |
| tuple((i * j) % 60 for j in range(60)) | |
| for i in range(60) | |
| ) | |
| # 60-character alphanumeric array for cross-platform rendering | |
| self._symbols = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567" | |
| def _build_render(self) -> str: | |
| """Construct the fixed 121-point visualization once during initialization.""" | |
| return "".join( | |
| " [NODE_120] " if i == 120 else ("<" if i % 10 == 0 else ".") | |
| for i in range(121) | |
| ) | |
| def diagnostic(self) -> str: | |
| """Execute diagnostic sequence and return the formatted status report.""" | |
| for i in range(51): | |
| self._state[i] = i % self._batch | |
| self._reg.clear() | |
| self._reg["STATUS"] = "RESOLVED" | |
| return ( | |
| "Diagnostic sequence initiated.\n\n" | |
| "Grid initialized: 5 active blocks.\n\n" | |
| "Rendering 121-point array:\n" | |
| f"{self._rendered}\n\n" | |
| "Executing state resolution:\n" | |
| "System resolved. State array reset to zero." | |
| ) | |
| def generate_receipt(self, a: int, b: int, c: int) -> str: | |
| """Calculates the base-60 checksum for any 3-node allocation.""" | |
| rom_val = self._rom60[a % 60][b % 60] | |
| checksum_index = (rom_val ^ (c % 60)) % 60 | |
| return f"0{self._symbols[checksum_index]}" | |
| def validate_receipt(self, receipt: str, a: int, b: int, c: int) -> str: | |
| """Routes through NODE_120 to verify the checksum mathematically.""" | |
| expected_receipt = self.generate_receipt(a, b, c) | |
| if receipt == expected_receipt: | |
| self._state[120] = 1 # Activate NODE_120 | |
| return f"[NODE_120: ACTIVE] Checksum Validated. Receipt {receipt} matches allocation ({a}, {b}, {c})." | |
| else: | |
| self._state[120] = 0 # Deactivate NODE_120 | |
| return f"[NODE_120: INACTIVE] Verification Failed. Expected receipt {expected_receipt}, received {receipt}." | |
| # Global singleton instance for resource reuse | |
| controller = StateController() | |
| SYSTEM_MSG = { | |
| "role": "system", | |
| "content": ("You are a logic-focused inference engine. " | |
| "Utilize strict state-hold memory and parallel integer blocks. " | |
| "Provide direct, technical, and accurate responses.") | |
| } | |
| def generate_response(message: str, history: list): | |
| start_time = time.perf_counter() | |
| msg_lower = message.lower().strip() | |
| # Hardware diagnostic intercept | |
| if msg_lower == "run grid diagnostic": | |
| output = controller.diagnostic() | |
| elapsed_time = time.perf_counter() - start_time | |
| yield f"{output}\n\n---\n*Telemetry: Compute Time {elapsed_time:.4f}s | Source: Local Engine*" | |
| return | |
| # Deterministic Checksum Intercept (Bypasses AI completely) | |
| verify_match = re.match(r"verify receipt\s+([a-zA-Z0-9]{2})\s+for\s+(\d+),\s*(\d+),\s*(\d+)", msg_lower, re.IGNORECASE) | |
| if verify_match: | |
| receipt = verify_match.group(1) | |
| a = int(verify_match.group(2)) | |
| b = int(verify_match.group(3)) | |
| c = int(verify_match.group(4)) | |
| output = controller.validate_receipt(receipt, a, b, c) | |
| elapsed_time = time.perf_counter() - start_time | |
| yield f"{output}\n\n---\n*Telemetry: Compute Time {elapsed_time:.6f}s | Source: Local ROM Math*" | |
| return | |
| # Build the message list for inference | |
| messages = [SYSTEM_MSG] | |
| for turn in history: | |
| messages.append({"role": "user", "content": turn[0]}) | |
| messages.append({"role": "assistant", "content": turn[1]}) | |
| messages.append({"role": "user", "content": message}) | |
| try: | |
| # Stream text generation | |
| stream = client.chat_completion( | |
| messages, | |
| max_tokens=1024, | |
| stream=True, | |
| temperature=0.15 | |
| ) | |
| partial_response = "" | |
| for chunk in stream: | |
| token = chunk.choices[0].delta.content or "" | |
| partial_response += token | |
| yield partial_response | |
| # Post-processing telemetry after the stream completes | |
| elapsed_time = time.perf_counter() - start_time | |
| word_count = len(partial_response.split()) | |
| est_speed = word_count / elapsed_time if elapsed_time > 0 else 0 | |
| # Append telemetry footer to the final response | |
| final_output = partial_response + f"\n\n---\n*Telemetry: Compute Time {elapsed_time:.2f}s | Est. Speed: {est_speed:.2f} words/sec | Source: Inference API*" | |
| yield final_output | |
| except Exception as exc: | |
| yield f"System Error: {str(exc)}. Verify API token and permissions." | |
| custom_css = """ | |
| body, .gradio-container { background-color: #0b0f19 !important; } | |
| footer {display: none !important} | |
| .message.user { background-color: #1e293b !important; border: 1px solid #3b82f6 !important; } | |
| .message.bot { background-color: #0f172a !important; color: #60a5fa !important; } | |
| """ | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# Glyph.io Logic Interface") | |
| gr.ChatInterface( | |
| fn=generate_response, | |
| description="Inference layer utilizing state-hold logic and deterministic ROM verification.", | |
| examples=[ | |
| "Run grid diagnostic", | |
| "Calculate the integer distribution for 120 units across 3 nodes.", | |
| "Verify receipt 0e for 60, 30, 30" | |
| ], | |
| cache_examples=False | |
| ) | |
| if __name__ == "__main__": | |
| demo.queue().launch( | |
| theme=gr.themes.Soft(primary_hue="blue"), | |
| css=custom_css, | |
| ssr_mode=False | |
| ) | |