Spaces:
Runtime error
Runtime error
| import sys, os | |
| BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) | |
| UTILS_DIR = os.path.join(BASE_DIR, "utils") | |
| if UTILS_DIR not in sys.path: | |
| sys.path.insert(0, UTILS_DIR) | |
| import streamlit as st | |
| import os, hashlib, re, sys | |
| # ─── Ensure omniscientframework package is importable ──────────────── | |
| ROOT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) | |
| PACKAGE_PATH = os.path.abspath(os.path.join(ROOT_PATH, "..")) | |
| if PACKAGE_PATH not in sys.path: | |
| sys.path.insert(0, PACKAGE_PATH) | |
| # ─── Import project utilities ──────────────────────────────────────── | |
| from omniscientframework.utils.file_utils import normalize_log_line | |
| from omniscientframework.utils.summarizer import summarize_text | |
| from omniscientframework.utils.docgen import generate_doc | |
| # ─── Page Setup ───────────────────────────────────────────────────── | |
| st.title("📂 AI Bulk Digest") | |
| st.write("Upload multiple files for batch digestion, summarization, and documentation.") | |
| # ─── Initialize Session State ─────────────────────────────────────── | |
| for key in ("uploaded_files", "errors", "bulk_digests"): | |
| if key not in st.session_state: | |
| st.session_state[key] = [] | |
| # ─── File Upload ──────────────────────────────────────────────────── | |
| uploads = st.file_uploader( | |
| "Upload files (scripts, logs, text, PDFs)", | |
| type=["py", "sh", "txt", "log", "pdf"], | |
| accept_multiple_files=True | |
| ) | |
| # ─── Bulk Processing Logic ───────────────────────────────────────── | |
| if uploads: | |
| digests = [] | |
| for f in uploads: | |
| try: | |
| # Read content | |
| content = f.read().decode("utf-8", errors="ignore") | |
| sha1 = hashlib.sha1(content.encode()).hexdigest() | |
| # File type detection | |
| is_log = f.name.endswith(".log") | |
| is_script = f.name.endswith((".py", ".sh")) | |
| # Normalize log lines | |
| if is_log: | |
| normalized = [normalize_log_line(line) for line in content.splitlines()] | |
| preview = "\n".join(normalized[:30]) | |
| else: | |
| preview = "\n".join(content.splitlines()[:30]) | |
| # Generate AI summary | |
| summary = summarize_text(content) | |
| # Auto-generate documentation for scripts | |
| doc = generate_doc(f.name, "uploaded", content) if is_script else None | |
| # Build result object | |
| result = { | |
| "name": f.name, | |
| "sha1": sha1, | |
| "preview": preview, | |
| "summary": summary, | |
| "doc": doc, | |
| } | |
| digests.append(result) | |
| # Track file name | |
| st.session_state.uploaded_files.append(f.name) | |
| # ─── Display Results ─────────────────────────────── | |
| st.subheader(f"📄 {f.name}") | |
| st.code(preview) | |
| st.markdown(f"**SHA1:** `{sha1}`") | |
| st.write("🧠 **Summary:**", summary) | |
| if doc: | |
| st.write("📘 **Generated Documentation:**") | |
| st.markdown(doc) | |
| except Exception as e: | |
| st.error(f"⚠️ Error processing {f.name}: {e}") | |
| st.session_state.errors.append(str(e)) | |
| # Save to session state | |
| st.session_state.bulk_digests = digests | |
| st.success(f"✅ Bulk digestion complete for {len(digests)} files.") | |