Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import json | |
| import uuid | |
| from datetime import datetime | |
| from rapidfuzz import process, fuzz | |
| from huggingface_hub import HfApi | |
| import os | |
| # --- CONFIGURATION --- | |
| DATA_FILE = "navy_acronyms_clean.json" | |
| SUGGESTIONS_REPO = "NavyDevilDoc/navy-acronym-suggestions" | |
| # --- PAGE SETUP --- | |
| st.set_page_config(page_title="Navy Acronym Finder", page_icon="β") | |
| st.title("EDQP Acronym Lookup") | |
| # --- LOAD DATA --- | |
| def load_acronyms(): | |
| try: | |
| with open(DATA_FILE, 'r', encoding='utf-8') as f: | |
| return json.load(f) | |
| except FileNotFoundError: | |
| return {} | |
| acronym_dict = load_acronyms() | |
| acronym_keys = list(acronym_dict.keys()) | |
| # --- SEARCH BAR --- | |
| query = st.text_input("Enter Acronym:", placeholder="e.g., ACAT, C4ISR...", max_chars=10) | |
| if query: | |
| query = query.upper().strip() | |
| if query in acronym_dict: | |
| st.success(f"**{query}**") | |
| st.markdown(f"### {acronym_dict[query]}") | |
| st.divider() | |
| st.markdown("#### Did you mean?") | |
| matches = process.extract(query, acronym_keys, limit=5, scorer=fuzz.ratio) | |
| for match in matches: | |
| candidate, score = match[0], match[1] | |
| if candidate != query and score > 40: | |
| with st.expander(f"**{candidate}** ({int(score)}%)"): | |
| st.write(acronym_dict[candidate]) | |
| # --- SUGGESTION BOX (The New Feature) --- | |
| st.divider() | |
| with st.expander("π Suggest an Acronym / Report an Error"): | |
| st.write("Don't see an acronym? Let me know and I'll add it to the database.") | |
| with st.form("suggestion_form"): | |
| s_acr = st.text_input("Acronym") | |
| s_def = st.text_area("Definition / Context") | |
| submitted = st.form_submit_button("Submit Suggestion") | |
| if submitted and s_acr and s_def: | |
| try: | |
| # 1. Prepare the Payload | |
| payload = { | |
| "acronym": s_acr.upper(), | |
| "definition": s_def, | |
| "timestamp": datetime.utcnow().isoformat() | |
| } | |
| # 2. Create a unique filename | |
| file_name = f"suggestion_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{uuid.uuid4().hex[:6]}.json" | |
| # --- THE FIX: Save to /tmp folder --- | |
| # We use /tmp because the container user always has write access there. | |
| temp_path = os.path.join("/tmp", file_name) | |
| # 3. Create the temporary local file | |
| with open(temp_path, "w") as f: | |
| json.dump(payload, f) | |
| # 4. Upload to the "Mailbox" Dataset | |
| api = HfApi(token=os.getenv("HF_TOKEN")) | |
| api.upload_file( | |
| path_or_fileobj=temp_path, | |
| path_in_repo=file_name, | |
| repo_id=SUGGESTIONS_REPO, | |
| repo_type="dataset" | |
| ) | |
| st.success("β Suggestion sent! I'll review it shortly.") | |
| # Cleanup local temp file | |
| if os.path.exists(temp_path): | |
| os.remove(temp_path) | |
| except Exception as e: | |
| st.error(f"Error sending suggestion: {e}") |