hellosindh's picture
Update app.py
901c500 verified
"""
Indus Script β€” Interactive Demo
HuggingFace Space using models from hellosindh/indus-script-models
"""
# Fix for Python 3.13 β€” audioop removed, mock it before gradio imports pydub
import sys, types
if "audioop" not in sys.modules:
sys.modules["audioop"] = types.ModuleType("audioop")
import gradio as gr
import torch
import torch.nn as nn
import torch.nn.functional as F
import pickle
import json
import math
import sys
from pathlib import Path
from huggingface_hub import snapshot_download
# ── Load models once at startup ────────────────────────────────
MODEL_REPO = "hellosindh/indus-script-models"
LOCAL_DIR = Path("indus_models")
def setup():
if not LOCAL_DIR.exists() or not (LOCAL_DIR / "models" / "nanogpt_indus.pt").exists():
print("Downloading models...")
snapshot_download(repo_id=MODEL_REPO, local_dir=str(LOCAL_DIR))
return LOCAL_DIR / "models", LOCAL_DIR / "data"
MODEL_DIR, DATA_DIR = setup()
sys.path.insert(0, str(LOCAL_DIR))
device = torch.device("cpu")
BOS_ID, EOS_ID, PAD_ID = 814, 815, 816
# Load glyph map
with open(DATA_DIR / "id_to_glyph.json", encoding="utf-8") as f:
GLYPH = json.load(f)
def G(tid):
return GLYPH.get(str(tid), "")
# Load sign index if available
SIGN_INDEX = {}
sign_idx_path = DATA_DIR / "sign_index.json"
if sign_idx_path.exists():
with open(sign_idx_path, encoding="utf-8") as f:
data = json.load(f)
for s in data.get("signs", []):
SIGN_INDEX[s["mahadevan_id"]] = s
# ── Load all models ────────────────────────────────────────────
from transformers import (BertForMaskedLM, BertForSequenceClassification,
BertModel, BertConfig, PreTrainedTokenizerFast)
tok_path = DATA_DIR / "indus_tokenizer"
TOK = PreTrainedTokenizerFast.from_pretrained(str(tok_path))
CLS = BertForSequenceClassification.from_pretrained(
str(MODEL_DIR / "cls")).to(device).eval()
MLM = BertForMaskedLM.from_pretrained(
str(MODEL_DIR / "mlm")).to(device).eval()
class ElectraDisc(nn.Module):
def __init__(self, cfg):
super().__init__()
self.bert = BertModel(cfg)
self.classifier = nn.Linear(cfg.hidden_size, 2)
self.dropout = nn.Dropout(0.1)
def forward(self, input_ids, attention_mask):
return self.classifier(self.dropout(
self.bert(input_ids=input_ids, attention_mask=attention_mask).last_hidden_state))
with open(MODEL_DIR / "electra" / "discriminator_config.json") as f:
ecfg = json.load(f)
ELEC = ElectraDisc(BertConfig(**ecfg))
ELEC.load_state_dict(torch.load(MODEL_DIR / "electra" / "discriminator.pt",
map_location=device, weights_only=True))
ELEC = ELEC.to(device).eval()
ELEC_TOK = PreTrainedTokenizerFast.from_pretrained(str(MODEL_DIR / "electra"))
import importlib.util
spec = importlib.util.spec_from_file_location("indus_ngram", LOCAL_DIR / "indus_ngram.py")
mod = importlib.util.module_from_spec(spec); spec.loader.exec_module(mod)
with open(MODEL_DIR / "ngram_model.pkl", "rb") as f:
NGRAM = pickle.load(f)
# NanoGPT
class CSA(nn.Module):
def __init__(self, c):
super().__init__()
self.nh=c["n_head"]; self.ne=c["n_embd"]; self.hd=c["n_embd"]//c["n_head"]
self.qkv=nn.Linear(c["n_embd"],3*c["n_embd"],bias=False)
self.proj=nn.Linear(c["n_embd"],c["n_embd"],bias=False)
self.drop=nn.Dropout(c["dropout"])
ml=c["block_size"]
self.register_buffer("mask",torch.tril(torch.ones(ml,ml)).view(1,1,ml,ml))
def forward(self,x):
B,T,C=x.shape
q,k,v=self.qkv(x).split(self.ne,dim=2)
sh=lambda t:t.view(B,T,self.nh,self.hd).transpose(1,2)
q,k,v=sh(q),sh(k),sh(v)
a=(q@k.transpose(-2,-1))/math.sqrt(self.hd)
a=a.masked_fill(self.mask[:,:,:T,:T]==0,float("-inf"))
return self.proj((self.drop(F.softmax(a,dim=-1))@v).transpose(1,2).contiguous().view(B,T,C))
class TB(nn.Module):
def __init__(self,c):
super().__init__()
self.ln1=nn.LayerNorm(c["n_embd"]); self.attn=CSA(c)
self.ln2=nn.LayerNorm(c["n_embd"])
self.ffn=nn.Sequential(nn.Linear(c["n_embd"],4*c["n_embd"]),nn.GELU(),
nn.Linear(4*c["n_embd"],c["n_embd"]),nn.Dropout(c["dropout"]))
def forward(self,x): return x+self.ffn(self.ln2(x+self.attn(self.ln1(x))))
class GPT(nn.Module):
def __init__(self,c):
super().__init__()
self.cfg=c
self.tok_emb=nn.Embedding(c["vocab_size"],c["n_embd"])
self.pos_emb=nn.Embedding(c["block_size"],c["n_embd"])
self.drop=nn.Dropout(c["dropout"])
self.blocks=nn.ModuleList([TB(c) for _ in range(c["n_layer"])])
self.ln_f=nn.LayerNorm(c["n_embd"])
self.head=nn.Linear(c["n_embd"],c["vocab_size"],bias=False)
self.tok_emb.weight=self.head.weight
def forward(self,idx):
B,T=idx.shape
x=self.drop(self.tok_emb(idx)+self.pos_emb(torch.arange(T,device=idx.device).unsqueeze(0)))
for b in self.blocks: x=b(x)
return self.head(self.ln_f(x))
@torch.no_grad()
def generate(self,temperature=0.85,top_k=40,max_len=10):
self.eval()
idx=torch.tensor([[BOS_ID]],device=device); gen=[]
for _ in range(max_len):
logits=self(idx[:,-self.cfg["block_size"]:])[: ,-1,:]/temperature
logits[:,PAD_ID]=logits[:,BOS_ID]=logits[:,EOS_ID]=float("-inf")
if top_k>0:
v,_=torch.topk(logits,min(top_k,logits.size(-1)))
logits[logits<v[:,[-1]]]=float("-inf")
nxt=torch.multinomial(F.softmax(logits,dim=-1),1)
if nxt.item()==EOS_ID: break
gen.append(nxt.item())
idx=torch.cat([idx,nxt],dim=1)
return list(reversed(gen))
ckpt = torch.load(MODEL_DIR / "nanogpt_indus.pt", map_location=device, weights_only=False)
GPT_MODEL = GPT(ckpt["cfg"])
GPT_MODEL.load_state_dict(ckpt["model_state"])
GPT_MODEL = GPT_MODEL.to(device).eval()
print("All models loaded.")
# ── Scoring helpers ────────────────────────────────────────────
def parse_seq(text):
tokens = text.strip().upper().split()
ids = []
for t in tokens:
if t == "[MASK]":
ids.append(None)
else:
t = t.lstrip("T")
try: ids.append(int(t))
except: pass
return ids
def bert_score(seq):
enc = TOK(" ".join(f"T{t}" for t in seq), return_tensors="pt",
truncation=True, max_length=32).to(device)
with torch.no_grad():
return float(torch.softmax(CLS(**enc).logits, dim=-1)[0][1])
def ngram_score(seq):
return NGRAM.validity_score(seq)
def electra_score(seq):
enc = ELEC_TOK(" ".join(f"T{t}" for t in seq), return_tensors="pt",
truncation=True, max_length=32).to(device)
with torch.no_grad():
logits = ELEC(enc["input_ids"], enc["attention_mask"])
probs = torch.softmax(logits[0], dim=-1)
n = min(len(seq), probs.shape[0]-1)
return float(probs[1:n+1, 0].mean())
def ensemble(seq):
b = bert_score(seq)
n = ngram_score(seq)
e = electra_score(seq)
return 0.50*b + 0.25*n + 0.25*e, b, n, e
def seq_to_glyphs(seq):
return "".join(G(t) for t in seq if t is not None)
def format_glyphs(seq):
parts = []
for t in seq:
if t is None:
parts.append("[?]")
else:
g = G(t)
parts.append(g if g else f"T{t}")
return " ".join(parts)
# ── Tab 1: Sign Lookup ─────────────────────────────────────────
def sign_lookup(query):
query = query.strip().upper().lstrip("T")
try:
tid = int(query)
except:
return "Enter a sign ID like **604** or **T604**"
glyph = G(tid)
info = SIGN_INDEX.get(tid, {})
role = info.get("role", "unknown")
count = info.get("corpus_count", 0)
freq = info.get("corpus_freq_pct", 0)
start_rate = info.get("start_rate_pct", 0)
end_rate = info.get("end_rate_pct", 0)
role_desc = {
"PREFIX" : "Appears at the RTL terminal position (reading end). Likely a title or determinative marker.",
"SUFFIX" : "Appears at the RTL initial position (reading start). Likely a closing or grammatical marker.",
"CORE" : "Appears in medial positions. Core content sign.",
"RARE" : "Appears rarely in corpus (≀50 times). Role uncertain.",
"UNSEEN" : "Not found in training corpus.",
}.get(role, "")
result = f"""## T{tid}
**Glyph:** {glyph if glyph else '(install Indus font to see glyph)'}
**Role:** {role}
{role_desc}
**Corpus statistics:**
- Appears **{count:,}** times ({freq:.3f}% of all tokens)
- At sequence start: **{start_rate:.2f}%** of inscriptions
- At sequence end: **{end_rate:.2f}%** of inscriptions
"""
return result
# ── Tab 2: Validate sequence ───────────────────────────────────
def validate_sequence(text):
seq = [t for t in parse_seq(text) if t is not None]
if len(seq) < 2:
return "Enter at least 2 signs, e.g. **T638 T177 T420**"
ens, b, n, e = ensemble(seq)
glyphs = format_glyphs(seq)
seq_str = " ".join(f"T{t}" for t in seq)
if ens >= 0.85:
verdict = "βœ… VALID β€” strong grammatical structure"
color = "green"
elif ens >= 0.70:
verdict = "⚠️ UNCERTAIN β€” some grammatical structure"
color = "orange"
else:
verdict = "❌ INVALID β€” weak or no grammatical structure"
color = "red"
# Check sign roles
roles = []
for t in seq:
info = SIGN_INDEX.get(t, {})
role = info.get("role", "?")
roles.append(f"T{t}={role}")
result = f"""## Result
**Sequence:** {seq_str}
**Glyphs:** {glyphs}
---
| Model | Score |
|---|---|
| TinyBERT | {b:.4f} |
| N-gram RTL | {n:.4f} |
| ELECTRA | {e:.4f} |
| **Ensemble** | **{ens:.4f}** |
**Verdict:** {verdict}
**Sign roles:** {', '.join(roles)}
"""
return result
# ── Tab 3: Predict masked sign ─────────────────────────────────
def predict_mask(text):
seq = parse_seq(text)
if None not in seq:
return "Include **[MASK]** in your sequence, e.g. **T638 [MASK] T420**"
if len(seq) < 2:
return "Enter at least 2 tokens including [MASK]"
parts = ["[MASK]" if t is None else f"T{t}" for t in seq]
enc = TOK(" ".join(parts), return_tensors="pt",
truncation=True, max_length=32).to(device)
with torch.no_grad():
logits = MLM(**enc).logits
results = []
for pos, val in enumerate(seq):
if val is not None: continue
tp, ti = torch.softmax(logits[0, pos+1], dim=-1).topk(8)
preds = []
for p, tid in zip(tp.tolist(), ti.tolist()):
ts = TOK.convert_ids_to_tokens([tid])[0]
if ts.startswith("T") and ts[1:].isdigit():
sign_id = int(ts[1:])
g = G(sign_id)
info = SIGN_INDEX.get(sign_id, {})
role = info.get("role", "?")
preds.append((sign_id, g, role, p))
if preds:
table = "| Sign | Glyph | Role | Confidence |\n|---|---|---|---|\n"
for sid, g, role, prob in preds[:6]:
table += f"| T{sid} | {g} | {role} | {prob*100:.1f}% |\n"
results.append(f"**Position {pos+1} β€” top predictions:**\n\n{table}")
return "\n\n".join(results) if results else "No predictions found"
# ── Tab 4: Generate ────────────────────────────────────────────
def generate_sequence(temperature, max_len):
seq = GPT_MODEL.generate(temperature=float(temperature), top_k=40,
max_len=int(max_len))
if len(seq) < 2:
return "Generation failed β€” try again"
ens, b, n, e = ensemble(seq)
glyphs = format_glyphs(seq)
seq_str = " ".join(f"T{t}" for t in seq)
roles = []
for t in seq:
info = SIGN_INDEX.get(t, {})
roles.append(info.get("role", "?")[0]) # first letter P/S/C/R
verdict = "βœ… VALID" if ens >= 0.85 else "⚠️ UNCERTAIN" if ens >= 0.70 else "❌ INVALID"
result = f"""## Generated Sequence
**Sequence:** {seq_str}
**Glyphs:** {glyphs}
**Roles:** {' β†’ '.join(roles)}
| Model | Score |
|---|---|
| TinyBERT | {b:.4f} |
| N-gram RTL | {n:.4f} |
| ELECTRA | {e:.4f} |
| **Ensemble** | **{ens:.4f}** |
**Verdict:** {verdict}
"""
return result
# ── Tab 5: Sign Explorer ───────────────────────────────────────
def explore_signs(role_filter, min_count):
if not SIGN_INDEX:
return "Sign index not available"
signs = [s for s in SIGN_INDEX.values()
if (role_filter == "ALL" or s.get("role") == role_filter)
and s.get("corpus_count", 0) >= int(min_count)]
signs.sort(key=lambda x: -x.get("corpus_count", 0))
if not signs:
return "No signs found with these filters"
rows = "| Sign | Glyph | Role | Count | Freq% | Start% | End% |\n"
rows += "|---|---|---|---|---|---|---|\n"
for s in signs[:50]:
tid = s.get("mahadevan_id", s.get("sign_id_num","?"))
g = G(tid) if isinstance(tid, int) else ""
role = s.get("role","?")
count = s.get("corpus_count",0)
freq = s.get("corpus_freq_pct",0)
start = s.get("start_rate_pct",0)
end = s.get("end_rate_pct",0)
rows += f"| T{tid} | {g} | {role} | {count:,} | {freq:.2f} | {start:.2f} | {end:.2f} |\n"
header = f"Showing {min(50,len(signs))} of {len(signs)} signs"
return f"{header}\n\n{rows}"
# ── Tab 6: Convert sign ↔ number ──────────────────────────────
def convert_sign(query):
query = query.strip()
lines = query.split()
results = []
for token in lines:
upper = token.upper()
if upper.startswith("T") and upper[1:].isdigit():
tid = int(upper[1:])
g = G(tid)
results.append(f"**{token}** β†’ Sign number **{tid}** | Glyph: {g if g else '(font needed)'}")
elif token.isdigit():
tid = int(token)
g = G(tid)
results.append(f"**{token}** β†’ Sign ID **T{tid}** | Glyph: {g if g else '(font needed)'}")
else:
results.append(f"**{token}** β†’ not recognised (use format: 604 or T604)")
return "\n\n".join(results) if results else "Enter sign IDs separated by spaces"
# ── Build Gradio UI ────────────────────────────────────────────
with gr.Blocks(title="Indus Script Models", theme=gr.themes.Soft()) as demo:
gr.Markdown("""
# 🏺 Indus Script β€” Interactive Demo
Models trained on 3,310 real archaeological inscriptions from the Indus Valley civilization (2600–1900 BCE).
Install the **Indus Brahmi font** to see actual glyphs. Without the font, glyph fields show as boxes.
""")
with gr.Tabs():
# Tab 1 β€” Sign Lookup
with gr.TabItem("πŸ” Sign Lookup"):
gr.Markdown("Look up any sign by its ID. Enter a number like `604` or `T638`.")
with gr.Row():
lookup_input = gr.Textbox(label="Sign ID", placeholder="604 or T604", scale=1)
lookup_button = gr.Button("Look up", variant="primary", scale=0)
lookup_output = gr.Markdown()
lookup_button.click(sign_lookup, inputs=lookup_input, outputs=lookup_output)
lookup_input.submit(sign_lookup, inputs=lookup_input, outputs=lookup_output)
# Tab 2 β€” Validate
with gr.TabItem("βœ… Validate Sequence"):
gr.Markdown("Enter an Indus Script sequence to check if it is grammatically valid.")
with gr.Row():
val_input = gr.Textbox(label="Sequence",
placeholder="T638 T177 T420 T122",
value="T638 T177 T420 T122", scale=3)
val_button = gr.Button("Validate", variant="primary", scale=0)
val_output = gr.Markdown()
val_button.click(validate_sequence, inputs=val_input, outputs=val_output)
val_input.submit(validate_sequence, inputs=val_input, outputs=val_output)
gr.Examples(
examples=[["T638 T177 T420 T122"],["T604 T123 T609"],
["T406 T638 T243"],["T101 T741"],
["T122 T638 T177"],["T999 T888"]],
inputs=val_input)
# Tab 3 β€” Predict Mask
with gr.TabItem("🎯 Predict Masked Sign"):
gr.Markdown("Replace one sign with `[MASK]` β€” the model predicts what sign belongs there.")
with gr.Row():
mask_input = gr.Textbox(label="Sequence with [MASK]",
placeholder="T638 [MASK] T420 T122",
value="T638 [MASK] T420 T122", scale=3)
mask_button = gr.Button("Predict", variant="primary", scale=0)
mask_output = gr.Markdown()
mask_button.click(predict_mask, inputs=mask_input, outputs=mask_output)
mask_input.submit(predict_mask, inputs=mask_input, outputs=mask_output)
gr.Examples(
examples=[["T638 [MASK] T420 T122"],["T604 [MASK] T609"],
["[MASK] T177 T420"],["T406 T638 [MASK]"]],
inputs=mask_input)
# Tab 4 β€” Generate
with gr.TabItem("⚑ Generate Sequence"):
gr.Markdown("Generate a new Indus Script sequence using NanoGPT.")
with gr.Row():
temp_slider = gr.Slider(0.7, 1.4, value=0.85, step=0.05,
label="Temperature (higher = more random)")
maxlen_slider = gr.Slider(3, 15, value=8, step=1,
label="Max length")
gen_button = gr.Button("Generate", variant="primary")
gen_output = gr.Markdown()
gen_button.click(generate_sequence,
inputs=[temp_slider, maxlen_slider],
outputs=gen_output)
# Tab 5 β€” Sign Explorer
with gr.TabItem("πŸ“Š Sign Explorer"):
gr.Markdown("Browse all 641 Indus signs filtered by grammatical role.")
with gr.Row():
role_dd = gr.Dropdown(["ALL","PREFIX","SUFFIX","CORE","RARE","UNSEEN"],
value="ALL", label="Role filter")
count_sl = gr.Slider(0, 200, value=0, step=10,
label="Min corpus count")
exp_button = gr.Button("Show signs", variant="primary")
exp_output = gr.Markdown()
exp_button.click(explore_signs, inputs=[role_dd, count_sl], outputs=exp_output)
# Tab 6 β€” Convert
with gr.TabItem("πŸ”„ Sign ↔ Number"):
gr.Markdown("""
Convert between sign IDs and numbers.
- Enter `604` β†’ get `T604` and its glyph
- Enter `T638` β†’ get `638` and its glyph
- Enter multiple separated by spaces: `604 638 177`
""")
with gr.Row():
conv_input = gr.Textbox(label="Sign ID(s)",
placeholder="604 or T638 or 604 638 177",
scale=3)
conv_button = gr.Button("Convert", variant="primary", scale=0)
conv_output = gr.Markdown()
conv_button.click(convert_sign, inputs=conv_input, outputs=conv_output)
conv_input.submit(convert_sign, inputs=conv_input, outputs=conv_output)
gr.Markdown("""
---
**Models:** [hellosindh/indus-script-models](https://huggingface.co/hellosindh/indus-script-models) |
**Dataset:** [hellosindh/indus-script-synthetic](https://huggingface.co/datasets/hellosindh/indus-script-synthetic)
""")
demo.launch()