Spaces:
Sleeping
Sleeping
File size: 13,985 Bytes
285c547 41387a4 285c547 41387a4 285c547 41387a4 285c547 41387a4 81e879b 41387a4 81e879b 41387a4 285c547 41387a4 285c547 41387a4 285c547 41387a4 285c547 41387a4 285c547 41387a4 81e879b 4c9f0fb 81e879b 4c9f0fb 41387a4 4c9f0fb e72d0e1 41387a4 285c547 41387a4 8758515 41387a4 81e879b 41387a4 285c547 41387a4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 |
import gradio as gr
import json
import logging
import argparse
import sys
import os
import math
import pickle
from deep_translator import GoogleTranslator
from gematria import calculate_gematria
from collections import defaultdict
from typing import Dict, List, Any, Optional
# --- Configuration ---
# Logging is kept for file-based or production logging, but we'll use print() for immediate console debug
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
BOOK_RANGE = range(1, 40)
CACHE_FILE = "tanakh_phrasedict.cache"
# --- Core Logic Functions ---
def get_power_result(total_sum: int, query_value: int) -> int:
"""Calculates the power or root result."""
if query_value <= 1:
return 1
if query_value < total_sum:
try:
exponent = int(math.floor(math.log(total_sum, query_value)))
return query_value ** exponent
except (ValueError, OverflowError):
return 1
def find_all_matching_phrases(target_sum: int, phrase_dictionary: Dict[int, List[Dict]]) -> List[Dict]:
"""Finds all phrases matching a target Gematria."""
return phrase_dictionary.get(int(target_sum), [])
# --- Global State: Load dictionary once at startup ---
try:
if not os.path.exists(CACHE_FILE):
raise FileNotFoundError(f"ERROR: Cache file '{CACHE_FILE}' not found. Please run 'build_indices.py' first.")
logging.info(f"Loading phrase dictionary from cache: {CACHE_FILE}")
with open(CACHE_FILE, 'rb') as f:
phrase_dictionary: Optional[Dict[int, List[Dict]]] = pickle.load(f)
logging.info("Phrase dictionary loaded successfully for the Gradio app.")
except (FileNotFoundError, IOError, pickle.UnpicklingError) as e:
logging.error(str(e))
phrase_dictionary = None
# --- Main Analysis Function for Gradio ---
def run_analysis(query: str, translate: bool, process_verses: int, results_per_verse: int, xor_depth: int, progress=gr.Progress(track_tqdm=True)):
"""The main analysis function called by the Gradio interface."""
if phrase_dictionary is None:
return "## Fatal Error\nCould not start analysis. The phrase dictionary cache file (`tanakh_phrasedict.cache`) is missing or corrupt. Please run `build_indices.py` and restart the app."
print("\n--- NEW ANALYSIS RUN ---") # Console Debug
output_lines = []
try:
query_value = calculate_gematria(query)
if query_value <= 1 and query:
return f"## Error\nQuery '{query}' has an invalid Gematria value ({query_value}). Please enter a valid query."
except Exception as e:
return f"## Error\nCould not calculate Gematria for query '{query}'. Details: {e}"
progress(0, desc="Initializing...")
translator = None
if translate:
try:
translator = GoogleTranslator(source='iw', target='en')
except Exception as e:
logging.error(f"Could not initialize translator: {e}")
output_lines.append(f"**Warning:** Could not initialize translator: {e}")
output_lines.append(f"## XOR Gematria Resonance Analysis for: `{query}`")
verses_processed = 0
resonance_count = 0
# Using a generator to handle the nested loops cleanly and break out
def get_verses():
for book_num in BOOK_RANGE:
filepath = f"texts/torah/{book_num:02}.json"
if not os.path.exists(filepath): continue
with open(filepath, 'r', encoding='utf-8') as f:
data = json.load(f)
for chap_idx, chapter in enumerate(data.get("text", []), start=1):
for verse_idx, verse_text in enumerate(chapter, start=1):
yield (book_num, chap_idx, verse_idx, verse_text)
for book_num, chap_idx, verse_idx, verse_text in get_verses():
# Correctly handle the processing limit
if process_verses and verses_processed >= process_verses:
print(f"DEBUG: Processing limit of {process_verses} verses reached. Stopping analysis.")
break
verses_processed += 1
progress(verses_processed / process_verses, desc=f"Analyzing Verse {verses_processed}/{process_verses}")
verse_sum = calculate_gematria(verse_text)
if verse_sum <= 1: continue
if query_value < verse_sum:
power_result = get_power_result(verse_sum, query_value)
main_target_sum = verse_sum ^ power_result
elif query_value > verse_sum:
main_target_sum = query_value ^ verse_sum
power_result= 0
elif query_value == verse_sum:
main_target_sum = verse_sum
power_result=0
main_matches = find_all_matching_phrases(main_target_sum, phrase_dictionary)
verse_ref = f"B{book_num:02d}, C{chap_idx}, V{verse_idx}"
if power_result == 0:
print(f"DEBUG: Analyzing [{verse_ref}] | Verse Sum: {verse_sum}, Main Target: {main_target_sum}") # Console Debug
print(f"DEBUG: Analyzing [{verse_ref}] | Verse Sum: {verse_sum}, Power/Root: {power_result}, Main Target: {main_target_sum}") # Console Debug
if not main_matches:
print("DEBUG: No main resonance found. Skipping.") # Console Debug
continue
resonance_count += 1
print(f"DEBUG: Found Resonance #{resonance_count}!") # Console Debug
output_lines.append("\n---\n")
output_lines.append(f"### Resonance #{resonance_count} in [{verse_ref}]")
output_lines.append(f"> {verse_text.strip()}\n")
output_lines.append("```")
output_lines.append(f"Verse Sum (X) : {verse_sum} | Query: \"{query}\" (G: {query_value}) | Power/Root (Y): {power_result}")
output_lines.append("```\n")
def format_matches(matches: List[Dict], title: str, calculation_str: str):
if not matches: return
matches.sort(key=lambda p: (p.get('freq', 0) / p.get('words', 99)), reverse=True)
matches_to_show = matches[:results_per_verse]
output_lines.append(f"**{title}:** `{calculation_str}`")
for match in matches_to_show:
translation_str = ""
if translator:
try:
translation_str = translator.translate(match['text'])
except Exception:
translation_str = "[Translation failed]"
score = (p.get('freq', 0) / p.get('words', 99)) if (p:=match).get('words') else 0
gematria_val = calculate_gematria(match['text'])
output_lines.append(f" * **{match['text']}**")
output_lines.append(f" `G: {gematria_val}, Words: {match.get('words', 'N/A')}, Freq: {match.get('freq', 'N/A')}, Score: {score:.2f}`")
if translation_str:
output_lines.append(f"\n*Translation: \"{translation_str}\"*")
output_lines.append("")
if power_result!=0:
calc_str = f"[{verse_sum}] ^ [{power_result}] โ [G_target:{main_target_sum}]"
elif power_result==0:
calc_str = f"[{verse_sum}] ^ [{query_value}] โ [G_target:{main_target_sum}]"
format_matches(main_matches, "Main Resonance", calc_str)
if xor_depth > 0:
output_lines.append(f"**Bitplane Variations of the Result ({main_target_sum}):**")
for depth in range(xor_depth):
bit_flip = 1 << depth
target_sum = main_target_sum ^ bit_flip
bitplane_matches = find_all_matching_phrases(target_sum, phrase_dictionary)
if bitplane_matches:
bitplane_calc_str = f"[{main_target_sum}] ^ [Bit {depth+1}] โ [G_target:{target_sum}]"
format_matches(bitplane_matches, f"Variation (Depth {depth + 1})", bitplane_calc_str)
if resonance_count == 0:
output_lines.append("\n**No resonances found. Consider increasing 'Verses to Process' or trying a different query.**")
print("--- ANALYSIS COMPLETE ---") # Console Debug
return "\n".join(output_lines)
# --- Gradio UI Definition ---
# Custom CSS fse:or a professional dark theme inspired by the screenshot
custom_css = """
#output_markdown h3 {
color: #f97316; /* Vibrant orange for main resonance headers */
border-bottom: 2px solid #374151;
padding-bottom: 8px;
margin-top: 24px;
}
#output_markdown blockquote {
background-color: #1f2937;
border-left: 5px solid #f97316;
padding: 12px;
font-style: italic;
color: #d1d5db;
}
#output_markdown code {
background-color: #374151;
color: #e5e7eb;
padding: 3px 6px;
border-radius: 5px;
font-size: 0.9em;
}
"""
# Using the robust Default theme and customizing it for the desired dark look
dark_theme = gr.themes.Default(
primary_hue=gr.themes.colors.orange,
secondary_hue=gr.themes.colors.blue,
neutral_hue=gr.themes.colors.slate
).set(
body_background_fill="#0f172a",
background_fill_primary="#1e293b",
background_fill_secondary="#334155",
body_text_color="#e2e8f0",
color_accent_soft="#1e293b",
border_color_accent="#334155",
border_color_primary="#334155",
button_primary_background_fill="#f97316",
button_primary_text_color="#ffffff",
button_secondary_background_fill="#334155",
button_secondary_text_color="#e2e8f0",
)
with gr.Blocks(theme=dark_theme, css=custom_css, title="Tanakh XOR Gematria Resonance") as demo:
gr.Markdown("# ๐ Tanakh XOR Gematria Resonance")
with gr.Tabs():
with gr.TabItem("XOR Gematria Resonance"):
with gr.Row():
with gr.Column(scale=1):
query = gr.Textbox(
label="Query Phrase",
placeholder="e.g., ืืืื, ืืืืื, light...",
)
run_button = gr.Button("๐ฎ Divine Resonance", variant="primary")
with gr.Accordion("Advanced Parameters", open=False):
process_verses = gr.Slider(
label="Verses to Process", minimum=1, maximum=35000, step=1, value=10,
info="How many verses to analyze from the start of the Tanakh."
)
results_per_verse = gr.Slider(
label="Results per Resonance", minimum=1, maximum=10, step=1, value=1,
info="How many top phrases to show for each found resonance type."
)
xor_depth = gr.Slider(
label="Bitplane Variation Depth", minimum=0, maximum=16, step=1, value=2,
info="How many bit-levels of the main result to vary and analyze."
)
translate = gr.Checkbox(label="Translate to English", value=True)
gr.Examples(
examples=[
["ืืืื"], ["ืืืืื"], ["ืฉืืื ื"],
["ืืฉืื ืืฉืืขืื"], ["ืืื ืฉืืืฉืื ืืฉืืข"], ["ืฆืืง ืืืฉืคื"]
],
inputs=[query]
)
with gr.Column(scale=3):
output_markdown = gr.Markdown(label="Resonances", elem_id="output_markdown")
with gr.TabItem("About & Help"):
gr.Markdown(
"""
### How It Works
This tool explores the numerological and structural connections within the Tanakh based on Gematria and bitwise XOR operations. It is an instrument for textual exploration, not a historical or theological authority.
1. **Gematria Calculation:** The Gematria (numerical value) of your **Query Phrase** and each **Verse** in the Tanakh is calculated.
2. **Power/Root Operator (Y):** To create a non-obvious link, the Query's Gematria is transformed. If it's smaller than the Verse's Gematria, its highest possible power is taken. If larger, its n-th root is taken. This becomes the "Operator" (Y).
3. **Main Resonance:** The core operation is `Verse_Gematria (X) ^ Operator (Y)`. The result is a **Target Gematria**. The app then finds all phrases in the Tanakh with this exact numerical value. This is the "Main Resonance".
4. **Bitplane Variations:** To explore the "fractal neighborhood" of the Main Resonance, the app then "flips" each bit of the result, one by one. For each flipped bit (`depth`), it calculates a new Target Gematria (`Main_Result ^ 2^depth`) and finds corresponding phrases. This reveals concepts that are numerologically "close" to the main result.
5. **Scoring:** Results are sorted by a relevance score calculated as `Frequency / Word_Count` to prioritize short, common phrases.
### Parameters
- **Verses to Process:** Limits how many verses the script analyzes. Higher numbers take longer.
- **Results per Resonance:** Limits how many phrases are shown for the main resonance and each variation.
- **Bitplane Variation Depth:** Controls how many "bit-flips" are tested. A depth of 5 will test flipping Bit 1, Bit 2, Bit 3, Bit 4, and Bit 5.
"""
)
run_button.click(
fn=run_analysis,
inputs=[query, translate, process_verses, results_per_verse, xor_depth],
outputs=[output_markdown]
)
if __name__ == "__main__":
if phrase_dictionary is None:
print("CRITICAL: Phrase dictionary could not be loaded. The application cannot start.")
print("Please ensure 'tanakh_phrasedict.cache' exists and is valid. Run 'build_indices.py' if necessary.")
else:
# The share=True argument creates a public link for easy sharing. Remove it if you only want local access.
demo.launch()
|