|
|
import gradio as gr |
|
|
import subprocess |
|
|
import re |
|
|
import os |
|
|
import uuid |
|
|
import json |
|
|
import random |
|
|
import html |
|
|
import threading |
|
|
from queue import Queue, Empty |
|
|
from typing import Optional, List, Any, Dict, Tuple |
|
|
from run_question import answer_question_recall |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
port = os.environ["PORT_SERPER_HOST"] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
try: |
|
|
print("Starting serper host server") |
|
|
_ = subprocess.Popen(["nohup", "sh", "./web_agents_5/host_serper2.sh"]) |
|
|
except Exception as e: |
|
|
print(f"Failed to start serper host server: {e}") |
|
|
pass |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ORDER_TOOL_CALLS_FIRST = False |
|
|
SPACER = '<div style="height:16px"></div>' |
|
|
MIN_TOOLCALLOUT_CHARS = 160 |
|
|
PREVIEW_LIMIT = 135 |
|
|
BOXED_WRAP_WIDTH = int(os.getenv("BOXED_WRAP_WIDTH", "130")) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
INVISIBLES = ('\ufeff', '\u200b', '\u200d') |
|
|
|
|
|
def _strip_leading_garbage(md: str) -> str: |
|
|
if not isinstance(md, str): |
|
|
md = str(md) |
|
|
|
|
|
for ch in INVISIBLES: |
|
|
md = md.replace(ch, '') |
|
|
|
|
|
md = md.replace('\r\n', '\n').replace('\r', '\n') |
|
|
|
|
|
md = re.sub(r'^\s*\n', '', md, count=0, flags=re.M) |
|
|
return md |
|
|
|
|
|
def _ensure_md_blocks(md: str) -> str: |
|
|
|
|
|
md = re.sub(r'(^|\n)(#{1,6}[^\n]+)\n(?!\n)', r'\1\2\n\n', md) |
|
|
return md |
|
|
|
|
|
def format_math(text: str) -> str: |
|
|
""" |
|
|
No-op math formatter. |
|
|
|
|
|
Previously this function attempted to: |
|
|
- Convert [ ... ] into $$ ... $$, and |
|
|
- Convert \( ... \) into $ ... $ |
|
|
|
|
|
That interfered with already-sanitized LaTeX (ensure_display_math), producing |
|
|
nested $$ inside $$ blocks and breaking KaTeX rendering. |
|
|
|
|
|
We now leave the string unchanged to avoid introducing invalid math delimiters |
|
|
post-sanitization. |
|
|
""" |
|
|
if not isinstance(text, str): |
|
|
return str(text) |
|
|
return text |
|
|
|
|
|
def generate_conversation_id(): |
|
|
return str(uuid.uuid4())[:8] |
|
|
|
|
|
def _maybe_json_load(s: Any) -> Any: |
|
|
if isinstance(s, (dict, list, int, float, bool)) or s is None: |
|
|
return s |
|
|
if not isinstance(s, str): |
|
|
return s |
|
|
for _ in range(2): |
|
|
try: |
|
|
s = json.loads(s) |
|
|
except Exception: |
|
|
break |
|
|
return s |
|
|
|
|
|
def _pretty_json(anything: Any) -> str: |
|
|
obj = _maybe_json_load(anything) |
|
|
if isinstance(obj, (dict, list, int, float, bool)) or obj is None: |
|
|
try: |
|
|
return json.dumps(obj, ensure_ascii=False, indent=2) |
|
|
except Exception: |
|
|
return str(obj) |
|
|
if isinstance(obj, str): |
|
|
if "\\n" in obj or "\\t" in obj: |
|
|
obj = obj.replace("\\n", "\n").replace("\\t", "\t") |
|
|
return obj |
|
|
return str(obj) |
|
|
|
|
|
def _code_block(lang: str, text: Any) -> str: |
|
|
s = str(text) if not isinstance(text, str) else text |
|
|
s = s.replace("```", "``\u200b`") |
|
|
return f"```{lang}\n{s}\n```" |
|
|
|
|
|
def _normalize_tool_call(tc: Any) -> Dict[str, Any]: |
|
|
if isinstance(tc, dict): |
|
|
return { |
|
|
"name": tc.get("name") or "tool", |
|
|
"arguments": tc.get("arguments") or tc.get("args") or {} |
|
|
} |
|
|
parsed = _maybe_json_load(tc) |
|
|
if isinstance(parsed, dict): |
|
|
return { |
|
|
"name": parsed.get("name") or "tool", |
|
|
"arguments": parsed.get("arguments") or parsed.get("args") or {} |
|
|
} |
|
|
return {"name": "tool", "arguments": {"raw": str(tc)}} |
|
|
|
|
|
def _letter(i: int) -> str: |
|
|
return f"{chr(96 + i)})" |
|
|
|
|
|
def _summarize_call(tc: Any) -> Tuple[str, str]: |
|
|
norm = _normalize_tool_call(tc) |
|
|
name = (norm.get("name") or "tool").lower() |
|
|
args = norm.get("arguments") or {} |
|
|
if name == "search_urls": |
|
|
return "Search", str(args.get("query", "")).strip() or "(no query)" |
|
|
if name == "query_url": |
|
|
return "Query URL", str(args.get("url", "")).strip() or "(no url)" |
|
|
label = norm.get("name") or "tool" |
|
|
val = json.dumps(args, ensure_ascii=False) if isinstance(args, (dict, list)) else str(args) |
|
|
if len(val) > PREVIEW_LIMIT: |
|
|
val = val[:PREVIEW_LIMIT] + "…" |
|
|
return label, val |
|
|
|
|
|
def _render_tool_calls_details(tool_calls: List[Any], tool_results: Optional[List[Any]] = None) -> str: |
|
|
if not tool_calls: |
|
|
return "" |
|
|
tool_results = tool_results or [] |
|
|
parts = [] |
|
|
parts.append("<strong>Tool calls</strong>:") |
|
|
parts.append(SPACER) |
|
|
for i, tc in enumerate(tool_calls, 1): |
|
|
label, value = _summarize_call(tc) |
|
|
summary_line = f"{_letter(i)} {label}: {value}" |
|
|
call_json = _pretty_json(tc) |
|
|
result_json = _pretty_json(tool_results[i - 1]) if (i - 1) < len(tool_results) else None |
|
|
parts.append(f"<details><summary><strong>{summary_line}</strong></summary>\n") |
|
|
parts.append("<div><strong>Tool Call</strong></div>\n\n") |
|
|
parts.append(_code_block("json", call_json)) |
|
|
parts.append("\n") |
|
|
parts.append("<div><strong>Tool Result</strong></div>\n\n") |
|
|
if result_json is None: |
|
|
parts.append("_(pending)_\n") |
|
|
else: |
|
|
parts.append(_code_block("json", result_json)) |
|
|
parts.append("\n") |
|
|
parts.append("</details>\n") |
|
|
parts.append(SPACER) |
|
|
parts.append(SPACER) |
|
|
return "".join(parts).strip() |
|
|
|
|
|
def _strip_tool_json_blobs(s: str) -> str: |
|
|
out, i, n = [], 0, len(s) |
|
|
while i < n: |
|
|
ch = s[i] |
|
|
if ch == '{': |
|
|
depth, k = 1, i + 1 |
|
|
while k < n: |
|
|
c = s[k] |
|
|
if c == '\\' and k + 1 < n: |
|
|
k += 2; continue |
|
|
if c == '{': |
|
|
depth += 1 |
|
|
elif c == '}': |
|
|
depth -= 1 |
|
|
if depth == 0: |
|
|
blob = s[i:k+1]; lc = blob.lower() |
|
|
if '"name"' in lc and '"arguments"' in lc: |
|
|
i = k + 1 |
|
|
break |
|
|
else: |
|
|
out.append(blob); i = k + 1; break |
|
|
k += 1 |
|
|
else: |
|
|
out.append(ch); i += 1 |
|
|
else: |
|
|
out.append(ch); i += 1 |
|
|
return "".join(out) |
|
|
|
|
|
def _plain_preview(md: str, limit: int = PREVIEW_LIMIT) -> str: |
|
|
md = _strip_tool_json_blobs(md or "") |
|
|
s = re.sub(r"<[^>]+>", "", md) |
|
|
s = re.sub(r"```.*?```", "", s, flags=re.S) |
|
|
s = re.sub(r"`{1,3}.*?`{1,3}", "", s, flags=re.S) |
|
|
s = re.sub(r"[*_#>\-]+", "", s) |
|
|
s = s.replace("\n", " ").strip() |
|
|
if len(s) > limit: |
|
|
s = s[:limit].rstrip() + "…" |
|
|
return s or "(no preview)" |
|
|
|
|
|
def _assistant_section(text_md: str) -> str: |
|
|
text_md = ensure_display_math(text_md) |
|
|
return ( |
|
|
f"<strong>Assistant Response</strong>:\n\n" |
|
|
f"<div class=\"fathom-content\" style=\"color:#000 !important;\">" |
|
|
f"{format_math(text_md).strip()}" |
|
|
f"</div>\n\n" |
|
|
f"{SPACER}" |
|
|
) |
|
|
|
|
|
def _wrap_turn(inner_html: str) -> str: |
|
|
return ( |
|
|
f"<div class=\"fathom-bubble\" style=\"background:#f7f9fc;border:1px solid #e5e9f2;" |
|
|
f"border-radius:12px;padding:14px 16px;margin:6px 0;color:#000;\">" |
|
|
f"{inner_html}</div>" |
|
|
) |
|
|
|
|
|
def _wrap_condensed_step(inner_html: str) -> str: |
|
|
"""Yellow sub-bubble wrapper for each condensed step inside the blue cluster.""" |
|
|
return ( |
|
|
f"<div class=\"fathom-bubble\" style=\"background:#fff9db;border:1px solid #ffe08a;" |
|
|
f"border-radius:12px;padding:12px 14px;margin:8px 0;color:#000;\">" |
|
|
f"{inner_html}</div>" |
|
|
) |
|
|
|
|
|
def _condensed_step_block(assistant_text: str, |
|
|
tool_calls: Optional[List[Any]] = None, |
|
|
tool_results: Optional[List[Any]] = None, |
|
|
last_open: bool = False) -> str: |
|
|
"""A single condensed step: details with assistant preview + (full assistant + tool calls) inside, wrapped yellow. |
|
|
|
|
|
last_open: when True, render this step expanded by default. |
|
|
""" |
|
|
preview = _plain_preview(assistant_text, PREVIEW_LIMIT) |
|
|
inner_parts = [] |
|
|
inner_parts.append(_assistant_section(assistant_text)) |
|
|
if tool_calls: |
|
|
inner_parts.append(_render_tool_calls_details(tool_calls, tool_results)) |
|
|
inner_html = "".join(inner_parts).strip() |
|
|
open_attr = " open" if last_open else "" |
|
|
details_html = ( |
|
|
f"<details{open_attr}><summary><strong>Thinking (preview)</strong>: {preview}</summary>\n\n" |
|
|
f"{inner_html}\n\n</details>" |
|
|
) |
|
|
return _wrap_condensed_step(details_html) |
|
|
|
|
|
def _cluster_wrapper(inner_html: str) -> str: |
|
|
"""Blue cluster bubble that holds all condensed steps. Expanded by default.""" |
|
|
return ( |
|
|
f"<div class=\"fathom-bubble\" style=\"background:#eef7ff;border:1px solid #cfe6ff;" |
|
|
f"border-radius:12px;padding:14px 16px;margin:10px 0;color:#000;\">" |
|
|
f"<details open><summary><strong>Initial thinking, steps, tool calls</strong></summary>\n\n" |
|
|
f"{inner_html}\n\n" |
|
|
f"</details></div>" |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
LATEX_ESC_MAP_IN_MATH = { |
|
|
'%': r'\%', |
|
|
'#': r'\#', |
|
|
|
|
|
} |
|
|
|
|
|
def _escape_unescaped_dollars(s: str) -> str: |
|
|
|
|
|
return re.sub(r'(?<!\\)\$', r'\\$', s) |
|
|
|
|
|
def _markdown_bold_to_tex(s: str) -> str: |
|
|
return re.sub(r'\*\*(.+?)\*\*', r'\\textbf{\1}', s) |
|
|
|
|
|
def _escape_other_specials_in_math(s: str) -> str: |
|
|
return re.sub(r'(?<!\\)[%#]', lambda m: LATEX_ESC_MAP_IN_MATH[m.group(0)], s) |
|
|
|
|
|
def _strip_naked_dollars_in_math(s: str) -> str: |
|
|
""" |
|
|
Remove any standalone $$ tokens that appear inside math content (not proper $$…$$ pairs). |
|
|
We do this BEFORE escaping $, so they don't turn into literal '\$\$'. |
|
|
""" |
|
|
|
|
|
s = re.sub(r'(^|\s)\$\$(\s|$)', lambda m: (m.group(1) or '') + (m.group(2) or ''), s) |
|
|
|
|
|
|
|
|
s = re.sub(r'\$\$(?=\s*(?:\\end|\)|}|$))', '', s) |
|
|
s = re.sub(r'(?:(?<=\\begin\{aligned\})|\n)\s*\$\$\s*', '', s) |
|
|
return s |
|
|
|
|
|
def _mask_text_blocks(s: str): |
|
|
""" |
|
|
Temporarily mask \text{...} spans (balanced) so we don’t insert \allowbreak inside them. |
|
|
Returns masked string and list of (token, original). |
|
|
""" |
|
|
out, tokens = [], [] |
|
|
i, n = 0, len(s) |
|
|
tcount = 0 |
|
|
while i < n: |
|
|
j = s.find(r'\text{', i) |
|
|
if j == -1: |
|
|
out.append(s[i:]); break |
|
|
out.append(s[i:j]) |
|
|
|
|
|
k = j + len(r'\text{'); depth = 1 |
|
|
while k < n and depth > 0: |
|
|
c = s[k] |
|
|
if c == '\\' and k + 1 < n: |
|
|
k += 2; continue |
|
|
if c == '{': depth += 1 |
|
|
elif c == '}': depth -= 1 |
|
|
k += 1 |
|
|
block = s[j:k] if depth == 0 else s[j:] |
|
|
token = f'__TEXTBLOCK_{tcount}__' |
|
|
tokens.append((token, block)) |
|
|
out.append(token) |
|
|
i = k if depth == 0 else n |
|
|
tcount += 1 |
|
|
return ''.join(out), tokens |
|
|
|
|
|
def _unmask_text_blocks(s: str, tokens) -> str: |
|
|
for token, block in tokens: |
|
|
s = s.replace(token, block) |
|
|
return s |
|
|
|
|
|
def _mask_command_blocks(s: str, commands: List[str]): |
|
|
""" |
|
|
Mask balanced \cmd{...} spans for specific single-argument commands so we can |
|
|
safely wrap surrounding plain text into \text{...} without splitting inside these blocks. |
|
|
|
|
|
IMPORTANT: |
|
|
- Only mask single-argument commands (e.g., \textbf{...}, \emph{...}). |
|
|
- Do NOT mask multi-argument commands (e.g., \href{..}{..}) to avoid brace imbalance. |
|
|
|
|
|
Returns (masked_string, tokens) where tokens is a list of (token, original_block). |
|
|
""" |
|
|
try: |
|
|
tokenized = s or "" |
|
|
tokens: List[Tuple[str, str]] = [] |
|
|
tcount = 0 |
|
|
for cmd in (commands or []): |
|
|
pattern = r'\\' + cmd + r'{' |
|
|
i = 0 |
|
|
n = len(tokenized) |
|
|
while i < n: |
|
|
j = tokenized.find(pattern, i) |
|
|
if j == -1: |
|
|
break |
|
|
|
|
|
k = j + len(pattern) |
|
|
depth = 1 |
|
|
while k < n and depth > 0: |
|
|
c = tokenized[k] |
|
|
if c == '\\' and k + 1 < n: |
|
|
k += 2 |
|
|
continue |
|
|
if c == '{': |
|
|
depth += 1 |
|
|
elif c == '}': |
|
|
depth -= 1 |
|
|
k += 1 |
|
|
if depth == 0: |
|
|
block = tokenized[j:k] |
|
|
token = f'__CMDBLOCK_{tcount}__' |
|
|
tokens.append((token, block)) |
|
|
tokenized = tokenized[:j] + token + tokenized[k:] |
|
|
n = len(tokenized) |
|
|
i = j + len(token) |
|
|
tcount += 1 |
|
|
else: |
|
|
|
|
|
i = j + len(pattern) |
|
|
return tokenized, tokens |
|
|
except Exception: |
|
|
return s, [] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _add_break_opportunities_outside_text(s: str) -> str: |
|
|
""" |
|
|
Add \allowbreak only outside \text{...} spans. |
|
|
""" |
|
|
masked, toks = _mask_text_blocks(s) |
|
|
|
|
|
masked = re.sub(r'(?<!\\)/', r'/\\allowbreak{}', masked) |
|
|
|
|
|
masked = masked.replace('≈', r'\allowbreak{}≈\allowbreak{}') |
|
|
masked = re.sub(r',(?!\s*\\allowbreak\{\})', r',\\allowbreak{}', masked) |
|
|
masked = masked.replace(':', r'\allowbreak{}:\allowbreak{}') |
|
|
return _unmask_text_blocks(masked, toks) |
|
|
|
|
|
def _fix_inline_links_in_math(s: str) -> str: |
|
|
r""" |
|
|
Inside math, handle model patterns like: $$Label$$(https://example.com) |
|
|
Be robust to \allowbreak{} sprinkled by earlier passes. |
|
|
1) $$Label$$(url) -> \href{url}{Label} |
|
|
2) any remaining $$...$$ -> \text{...} |
|
|
""" |
|
|
|
|
|
AB = r'(?:\\allowbreak\{\})*' |
|
|
|
|
|
url_pat = rf'(https?{AB}:{AB}//{AB}[^)]+?)' |
|
|
|
|
|
|
|
|
s = re.sub( |
|
|
rf'\$\$(.+?)\$\$\({url_pat}\)', |
|
|
r'\\href{\2}{\1}', |
|
|
s, |
|
|
flags=re.S |
|
|
) |
|
|
|
|
|
|
|
|
s = re.sub(r'\$\$(.+?)\$\$', r'\\text{\1}', s, flags=re.S) |
|
|
return s |
|
|
|
|
|
|
|
|
def _strip_all_nested_dollars(s: str) -> str: |
|
|
""" |
|
|
After all transforms, if any '$$' survive inside math content, |
|
|
convert the smallest $$...$$ spans to \text{...}. Repeat until gone. |
|
|
""" |
|
|
|
|
|
while '$$' in s: |
|
|
new_s = re.sub(r'\$\$(.+?)\$\$', r'\\text{\1}', s, flags=re.S) |
|
|
if new_s == s: |
|
|
|
|
|
new_s = s.replace('$$', '') |
|
|
s = new_s |
|
|
return s |
|
|
|
|
|
def _cleanup_tex_command_braces_and_percent(s: str) -> str: |
|
|
""" |
|
|
Final cleanup for LaTeX text: |
|
|
- Fix escaped brace after single-argument commands: \textbf\{ -> \textbf{, \emph\{ -> \emph{ |
|
|
- Collapse double-backslash percent: \\% -> \% (avoids '%' commenting the rest of the line after a linebreak) |
|
|
Patterns are conservative and won't affect '\\\\' row breaks since they aren't followed by '%'. |
|
|
""" |
|
|
try: |
|
|
|
|
|
s = re.sub(r'\\(text|textbf|emph)\\\{', r'\\\1{', s) |
|
|
|
|
|
s = re.sub(r'\\{2}%', r'\\%', s) |
|
|
return s |
|
|
except Exception: |
|
|
return s |
|
|
|
|
|
def _normalize_escaped_closing_braces_for_single_arg_commands(s: str, commands: List[str]) -> str: |
|
|
""" |
|
|
Regex-normalize cases like '\textbf{Batters\}:' -> '\textbf{Batters}:' |
|
|
Works for simple single-arg commands where the content has no nested braces. |
|
|
This is a fast-path fix; more complex cases are handled by the scanner-based repair. |
|
|
""" |
|
|
try: |
|
|
out = s or "" |
|
|
for cmd in (commands or []): |
|
|
|
|
|
out = re.sub(rf'\\{cmd}\s*\{{([^{{}}]*?)\\\}}', rf'\\{cmd}{{\1}}', out) |
|
|
return out |
|
|
except Exception: |
|
|
return s |
|
|
|
|
|
def _repair_unclosed_single_arg_commands(s: str, commands: List[str]) -> str: |
|
|
""" |
|
|
Heuristically repair single-argument commands like \textbf{...}, \emph{...}, \text{...} |
|
|
that were incorrectly closed with a literal '\}' instead of '}' (leaving the group |
|
|
unbalanced and causing 'File ended while scanning ...'). |
|
|
Supports optional whitespace between the command and '{'. |
|
|
Only affects the specified single-argument commands and only when their top-level |
|
|
argument fails to close. |
|
|
""" |
|
|
try: |
|
|
out = s or "" |
|
|
for cmd in (commands or []): |
|
|
pat = re.compile(r'\\' + cmd + r'\s*{') |
|
|
i = 0 |
|
|
while True: |
|
|
m = pat.search(out, i) |
|
|
if not m: |
|
|
break |
|
|
j = m.start() |
|
|
k = m.end() |
|
|
n = len(out) |
|
|
depth = 1 |
|
|
last_esc_close_top = -1 |
|
|
while k < n: |
|
|
c = out[k] |
|
|
if c == '\\' and k + 1 < n: |
|
|
|
|
|
if out[k + 1] == '}' and depth == 1: |
|
|
last_esc_close_top = k |
|
|
k += 2 |
|
|
continue |
|
|
if c == '{': |
|
|
depth += 1 |
|
|
elif c == '}': |
|
|
depth -= 1 |
|
|
if depth == 0: |
|
|
break |
|
|
k += 1 |
|
|
if depth == 0: |
|
|
|
|
|
i = k + 1 |
|
|
continue |
|
|
|
|
|
if last_esc_close_top != -1: |
|
|
out = out[:last_esc_close_top] + out[last_esc_close_top + 1:] |
|
|
|
|
|
i = j |
|
|
continue |
|
|
|
|
|
i = m.end() |
|
|
return out |
|
|
except Exception: |
|
|
return s |
|
|
|
|
|
def _repair_unclosed_single_arg_commands_in_text_groups(s: str, commands: List[str]) -> str: |
|
|
""" |
|
|
Within each \text{...} group, repair unclosed single-arg commands like \textbf{...}, \emph{...} |
|
|
where the closing brace was emitted as '\}' and thus remained unbalanced inside the text group. |
|
|
This confines the scan to the text group's inner content so outer braces aren't misinterpreted. |
|
|
""" |
|
|
try: |
|
|
out: List[str] = [] |
|
|
i, n = 0, len(s or "") |
|
|
text_pat = re.compile(r'\\text\s*{') |
|
|
while i < n: |
|
|
m = text_pat.search(s, i) |
|
|
if not m: |
|
|
out.append(s[i:]) |
|
|
break |
|
|
|
|
|
out.append(s[i:m.end()]) |
|
|
|
|
|
k = m.end() |
|
|
depth = 1 |
|
|
while k < n and depth > 0: |
|
|
c = s[k] |
|
|
if c == '\\' and k + 1 < n: |
|
|
k += 2 |
|
|
continue |
|
|
if c == '{': |
|
|
depth += 1 |
|
|
elif c == '}': |
|
|
depth -= 1 |
|
|
k += 1 |
|
|
if depth != 0: |
|
|
|
|
|
out.append(s[m.end():]) |
|
|
break |
|
|
inner = s[m.end():k-1] |
|
|
fixed_inner = _repair_unclosed_single_arg_commands(inner, commands) |
|
|
out.append(fixed_inner) |
|
|
out.append('}') |
|
|
i = k |
|
|
return ''.join(out) or s |
|
|
except Exception: |
|
|
return s |
|
|
|
|
|
def _unwrap_envs_from_text(s: str) -> str: |
|
|
""" |
|
|
If the model emits \text{ \begin{env} ... \end{env} }, unwrap the environment |
|
|
from the \text{...} wrapper so it's in math mode again. |
|
|
Run repeatedly until nothing to unwrap. |
|
|
""" |
|
|
|
|
|
env_pat = re.compile(r'\\text\s*\{\s*(\\begin\{[a-zA-Z*]+\}.*?\\end\{[a-zA-Z*]+\})\s*\}', re.S) |
|
|
|
|
|
boxed_pat = re.compile(r'\\text\s*\{\s*(\\boxed\{.*?\})\s*\}', re.S) |
|
|
|
|
|
while True: |
|
|
new_s = env_pat.sub(r'\1', s) |
|
|
new_s = boxed_pat.sub(r'\1', new_s) |
|
|
if new_s == s: |
|
|
break |
|
|
s = new_s |
|
|
return s |
|
|
|
|
|
def _sanitize_inside_math(s: str) -> str: |
|
|
|
|
|
s = _unwrap_envs_from_text(s) |
|
|
|
|
|
|
|
|
s = _markdown_bold_to_tex(s) |
|
|
|
|
|
s = _fix_inline_links_in_math(s) |
|
|
|
|
|
s = _escape_unescaped_dollars(s) |
|
|
|
|
|
s = _escape_other_specials_in_math(s) |
|
|
|
|
|
|
|
|
|
|
|
s = _strip_all_nested_dollars(s) |
|
|
return s |
|
|
|
|
|
def _force_multiline_box_content(s: str) -> str: |
|
|
""" |
|
|
If a \boxed{...} inner content appears to be plain text (no multiline constructs), |
|
|
render it as multiple lines with left alignment. |
|
|
|
|
|
Heuristics (applied in order): |
|
|
- If inner already has \begin{...}, \end{...}, \aligned, '\\\\' or '&', leave as-is. |
|
|
- If it contains explicit '\\' breaks without an environment, split on them. |
|
|
- If content looks like a JSON array [ {...}, {...}, ... ], render one object per row. |
|
|
- Split on ' > ' (or any whitespace '>' whitespace) to break comparative/ranking summaries. |
|
|
- Prefer splitting at numbered list markers like '1. ', '2. '. |
|
|
- Otherwise honor explicit newlines. |
|
|
- As fallback for very long text, split at ' - ' bullets. |
|
|
|
|
|
Formatting: |
|
|
- Use \\begin{array}{l} ... \\end{array} so rows are left-aligned. |
|
|
- Wrap a row into \\text{...} when it has no TeX commands already. |
|
|
- Escape { } _ & inside \\text{...}. |
|
|
""" |
|
|
def _fmt_lines_array(lines: List[str]) -> str: |
|
|
out = [] |
|
|
for l in lines: |
|
|
l = l.strip() |
|
|
if not l: |
|
|
continue |
|
|
if re.search(r'\\[a-zA-Z]+', l): |
|
|
|
|
|
try: |
|
|
masked_text, toks_text = _mask_text_blocks(l) |
|
|
masked_cmd, toks_cmd = _mask_command_blocks(masked_text, ["textbf", "emph"]) |
|
|
|
|
|
parts = re.split(r'(__TEXTBLOCK_\d+__|__CMDBLOCK_\d+__)', masked_cmd) |
|
|
rebuilt = [] |
|
|
for part in parts: |
|
|
if not part: |
|
|
continue |
|
|
if part.startswith('__TEXTBLOCK_') or part.startswith('__CMDBLOCK_'): |
|
|
rebuilt.append(part) |
|
|
else: |
|
|
|
|
|
esc = ( |
|
|
part.replace('{', r'\{') |
|
|
.replace('}', r'\}') |
|
|
.replace('_', r'\_') |
|
|
.replace('&', r'\&') |
|
|
.replace('%', r'\%') |
|
|
.replace('#', r'\#') |
|
|
) |
|
|
rebuilt.append(r'\text{' + esc + '}') |
|
|
mixed = "".join(rebuilt) |
|
|
|
|
|
mixed = _unmask_text_blocks(mixed, toks_cmd) |
|
|
mixed = _unmask_text_blocks(mixed, toks_text) |
|
|
out.append(mixed) |
|
|
except Exception: |
|
|
out.append(l) |
|
|
else: |
|
|
esc = ( |
|
|
l.replace('{', r'\{') |
|
|
.replace('}', r'\}') |
|
|
.replace('_', r'\_') |
|
|
.replace('&', r'\&') |
|
|
.replace('%', r'\%') |
|
|
.replace('#', r'\#') |
|
|
) |
|
|
out.append(r'\text{' + esc + '}') |
|
|
return r'\begin{array}{l}' + r' \\\\ '.join(out) + r'\end{array}' |
|
|
|
|
|
try: |
|
|
txt = s or "" |
|
|
|
|
|
|
|
|
if re.search(r'(\\begin\{|\\end\{|\\aligned|\\\\)', txt): |
|
|
return txt |
|
|
|
|
|
|
|
|
masked_txt, toks = _mask_text_blocks(txt) |
|
|
|
|
|
|
|
|
masked_txt = re.sub(r':(?!//)', ':\n', masked_txt) |
|
|
|
|
|
|
|
|
|
|
|
masked_txt = re.sub(r'\s-\s', '\n- ', masked_txt) |
|
|
|
|
|
|
|
|
|
|
|
parts = [ln.strip() for ln in re.split(r'\s*\n+\s*', masked_txt) if ln.strip()] |
|
|
if not parts: |
|
|
parts = [masked_txt.strip()] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
W = BOXED_WRAP_WIDTH |
|
|
|
|
|
def _wrap_masked_by_space_next(s: str, width: int) -> List[str]: |
|
|
out: List[str] = [] |
|
|
s = s.strip() |
|
|
while len(s) > width: |
|
|
|
|
|
m = re.search(r'\s', s[width:]) |
|
|
if m: |
|
|
cut = width + m.start() |
|
|
out.append(s[:cut].rstrip()) |
|
|
s = s[cut+1:].lstrip() |
|
|
continue |
|
|
|
|
|
prev = s.rfind(' ') |
|
|
if prev != -1: |
|
|
out.append(s[:prev].rstrip()) |
|
|
s = s[prev+1:].lstrip() |
|
|
else: |
|
|
|
|
|
out.append(s) |
|
|
s = "" |
|
|
if s: |
|
|
out.append(s) |
|
|
return out |
|
|
|
|
|
wrapped_masked: List[str] = [] |
|
|
for p in parts: |
|
|
wrapped_masked.extend(_wrap_masked_by_space_next(p, W)) |
|
|
|
|
|
|
|
|
if len(wrapped_masked) > 1: |
|
|
unmasked_lines = [_unmask_text_blocks(line, toks) for line in wrapped_masked] |
|
|
return _fmt_lines_array(unmasked_lines) |
|
|
|
|
|
|
|
|
txt = _unmask_text_blocks(wrapped_masked[0], toks) |
|
|
|
|
|
|
|
|
if re.search(r'\\\\', txt): |
|
|
lines = [ln.strip() for ln in re.split(r'\s*\\\\\s*', txt) if ln.strip()] |
|
|
if lines: |
|
|
return _fmt_lines_array(lines) |
|
|
|
|
|
|
|
|
m = re.match(r'^\s*\[(.*)\]\s*$', txt, flags=re.S) |
|
|
if m: |
|
|
inner = m.group(1) |
|
|
parts = re.split(r'\},\s*', inner) |
|
|
lines = [] |
|
|
for p in parts: |
|
|
p = p.strip() |
|
|
if not p: |
|
|
continue |
|
|
if not p.endswith('}'): |
|
|
p = p + '}' |
|
|
p = p.lstrip(',') |
|
|
lines.append(p) |
|
|
if lines: |
|
|
return _fmt_lines_array(lines) |
|
|
|
|
|
|
|
|
gt_parts = [p.strip() for p in re.split(r'\s*>\s*', txt) if p and p.strip()] |
|
|
if len(gt_parts) > 1: |
|
|
lines = [] |
|
|
for idx, part in enumerate(gt_parts): |
|
|
lines.append(part if idx == 0 else ('> ' + part)) |
|
|
return _fmt_lines_array(lines) |
|
|
|
|
|
|
|
|
|
|
|
m_assume = re.search(r'(?im)^\s*(?:\*{0,2}\s*)?(?:\\textbf\{)?Assumptions(?:\})?\s*:?', txt) |
|
|
if m_assume: |
|
|
head = txt[:m_assume.start()].strip() |
|
|
tail = txt[m_assume.start():].strip() |
|
|
|
|
|
|
|
|
head_tokens = re.split(r'(?=(?:^|\n|\s)\d+\.\s)', head) |
|
|
head_lines = [t.strip() for t in head_tokens if t and t.strip()] |
|
|
if len(head_lines) <= 1: |
|
|
head_lines = [ln.strip() for ln in re.split(r'\s*\n+\s*', head) if ln.strip()] |
|
|
|
|
|
|
|
|
tail_lines = [ln.strip() for ln in re.split(r'\s*\n+\s*', tail) if ln.strip()] |
|
|
if len(tail_lines) <= 1: |
|
|
tail_lines = [p.strip() for p in re.split(r'\s+-\s+', tail) if p.strip()] |
|
|
|
|
|
lines = head_lines + tail_lines |
|
|
if lines: |
|
|
return _fmt_lines_array(lines) |
|
|
|
|
|
|
|
|
tokens = re.split(r'(?=(?:^|\n|\s)\d+\.\s)', txt) |
|
|
lines = [t.strip() for t in tokens if t and t.strip()] |
|
|
|
|
|
|
|
|
if len(lines) <= 1: |
|
|
nl_lines = [ln.strip() for ln in re.split(r'\s*\n+\s*', txt) if ln.strip()] |
|
|
if len(nl_lines) > 1: |
|
|
lines = nl_lines |
|
|
|
|
|
|
|
|
if len(lines) <= 1 and len(txt.strip()) > 120: |
|
|
bullet_lines = [b.strip() for b in re.split(r'\s+-\s+', txt) if b.strip()] |
|
|
if len(bullet_lines) > 1: |
|
|
lines = bullet_lines |
|
|
|
|
|
if len(lines) > 1: |
|
|
return _fmt_lines_array(lines) |
|
|
|
|
|
|
|
|
W = BOXED_WRAP_WIDTH |
|
|
txt_stripped = txt.strip() |
|
|
if len(txt_stripped) > W: |
|
|
def _soft_wrap_by_space(t: str, width: int = 80) -> List[str]: |
|
|
rows: List[str] = [] |
|
|
s2 = t.strip() |
|
|
while len(s2) > width: |
|
|
cut = s2.rfind(' ', 0, width + 1) |
|
|
if cut == -1: |
|
|
cut = width |
|
|
rows.append(s2[:cut].strip()) |
|
|
s2 = s2[cut:].lstrip() |
|
|
if s2: |
|
|
rows.append(s2) |
|
|
return rows |
|
|
soft_lines = _soft_wrap_by_space(txt_stripped, W) |
|
|
if len(soft_lines) > 1: |
|
|
return _fmt_lines_array(soft_lines) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _escape_text_mode_local(s2: str) -> str: |
|
|
return ( |
|
|
s2.replace('{', r'\{') |
|
|
.replace('}', r'\}') |
|
|
.replace('_', r'\_') |
|
|
.replace('&', r'\&') |
|
|
.replace('%', r'\%') |
|
|
.replace('#', r'\#') |
|
|
) |
|
|
|
|
|
def _wrap_plain_outside_commands_local(s2: str) -> str: |
|
|
n2 = len(s2) |
|
|
i2 = 0 |
|
|
parts2: List[str] = [] |
|
|
plain2: List[str] = [] |
|
|
|
|
|
def flush_plain2(): |
|
|
if not plain2: |
|
|
return |
|
|
chunk2 = ''.join(plain2) |
|
|
if chunk2: |
|
|
parts2.append(r'\text{' + _escape_text_mode_local(chunk2) + '}') |
|
|
plain2.clear() |
|
|
|
|
|
while i2 < n2: |
|
|
ch2 = s2[i2] |
|
|
if ch2 == '\\': |
|
|
|
|
|
flush_plain2() |
|
|
j2 = i2 + 1 |
|
|
|
|
|
while j2 < n2 and s2[j2].isalpha(): |
|
|
j2 += 1 |
|
|
|
|
|
if j2 == i2 + 1: |
|
|
if i2 + 1 < n2: |
|
|
parts2.append(s2[i2:i2+2]) |
|
|
i2 += 2 |
|
|
else: |
|
|
parts2.append('\\') |
|
|
i2 += 1 |
|
|
continue |
|
|
|
|
|
k2 = j2 |
|
|
while k2 < n2 and s2[k2] == '{': |
|
|
depth2 = 1 |
|
|
t = k2 + 1 |
|
|
while t < n2 and depth2 > 0: |
|
|
c3 = s2[t] |
|
|
if c3 == '\\' and t + 1 < n2: |
|
|
t += 2 |
|
|
continue |
|
|
if c3 == '{': |
|
|
depth2 += 1 |
|
|
elif c3 == '}': |
|
|
depth2 -= 1 |
|
|
t += 1 |
|
|
k2 = t |
|
|
if depth2 != 0: |
|
|
break |
|
|
parts2.append(s2[i2:(k2 if k2 > j2 else j2)]) |
|
|
i2 = (k2 if k2 > j2 else j2) |
|
|
else: |
|
|
plain2.append(ch2) |
|
|
i2 += 1 |
|
|
|
|
|
flush_plain2() |
|
|
return ''.join(parts2) |
|
|
|
|
|
if not re.search(r'\\[a-zA-Z]+', txt): |
|
|
|
|
|
esc = _escape_text_mode_local(txt_stripped) |
|
|
return r'\text{' + esc + '}' |
|
|
else: |
|
|
|
|
|
return _wrap_plain_outside_commands_local(txt) |
|
|
except Exception: |
|
|
return s |
|
|
|
|
|
def ensure_display_math(s: str) -> str: |
|
|
""" |
|
|
Sanitize math content robustly WITHOUT double-wrapping: |
|
|
- Sanitize content of EXISTING $$…$$ blocks in place. |
|
|
- For \begin{aligned}…\end{aligned}: if outside $$, wrap; if inside, just sanitize. |
|
|
- For \boxed{…}: if outside $$, wrap; if inside, just sanitize. |
|
|
- ALSO: unwrap envs accidentally wrapped in \text{...} at the whole-string level. |
|
|
""" |
|
|
if not isinstance(s, str) or not s: |
|
|
return s |
|
|
|
|
|
|
|
|
s = _unwrap_envs_from_text(s) |
|
|
|
|
|
display_blocks = [] |
|
|
def _sanitize_display(m): |
|
|
start, end = m.span() |
|
|
content = m.group(1) |
|
|
content = _sanitize_inside_math(content) |
|
|
display_blocks.append((start, start + 2, end - 2, end)) |
|
|
return "$$\n" + content + "\n$$" |
|
|
s = re.sub(r'\$\$(.*?)\$\$', _sanitize_display, s, flags=re.S) |
|
|
|
|
|
|
|
|
def _collect_display_spans_in_text(text: str): |
|
|
spans = [] |
|
|
i3, n3 = 0, len(text) |
|
|
while i3 < n3: |
|
|
a = text.find('$$', i3) |
|
|
if a == -1: |
|
|
break |
|
|
b = text.find('$$', a + 2) |
|
|
if b == -1: |
|
|
break |
|
|
spans.append((a, b + 2)) |
|
|
i3 = b + 2 |
|
|
return spans |
|
|
|
|
|
display_spans = _collect_display_spans_in_text(s) |
|
|
|
|
|
def _inside_display(idx: int) -> bool: |
|
|
for (a, b) in display_spans: |
|
|
if a <= idx <= b: |
|
|
return True |
|
|
return False |
|
|
|
|
|
|
|
|
def _collect_boxed_spans(text: str): |
|
|
spans = [] |
|
|
i2, n2 = 0, len(text) |
|
|
while i2 < n2: |
|
|
j2 = text.find(r'\boxed{', i2) |
|
|
if j2 == -1: |
|
|
break |
|
|
k2 = j2 + len(r'\boxed{'); depth2 = 1 |
|
|
while k2 < n2 and depth2 > 0: |
|
|
c2 = text[k2] |
|
|
if c2 == '\\' and k2 + 1 < n2: |
|
|
k2 += 2; continue |
|
|
if c2 == '{': |
|
|
depth2 += 1 |
|
|
elif c2 == '}': |
|
|
depth2 -= 1 |
|
|
if depth2 == 0: |
|
|
spans.append((j2, k2)) |
|
|
break |
|
|
k2 += 1 |
|
|
i2 = k2 if k2 > j2 else j2 + 1 |
|
|
return spans |
|
|
|
|
|
boxed_spans = _collect_boxed_spans(s) |
|
|
def _inside_boxed(idx: int) -> bool: |
|
|
for (a, b) in boxed_spans: |
|
|
if a <= idx <= b: |
|
|
return True |
|
|
return False |
|
|
|
|
|
|
|
|
def _proc_aligned(m): |
|
|
start, _ = m.span(1) |
|
|
blk = _sanitize_inside_math(m.group(1)) |
|
|
return blk if (_inside_display(start) or _inside_boxed(start)) else "$$\n" + blk + "\n$$" |
|
|
s = re.sub(r'(\\begin\{aligned\}.*?\\end\{aligned\})', _proc_aligned, s, flags=re.S) |
|
|
|
|
|
|
|
|
s = _normalize_escaped_closing_braces_for_single_arg_commands(s, ["textbf", "emph"]) |
|
|
|
|
|
|
|
|
out = []; i, n = 0, len(s) |
|
|
while i < n: |
|
|
j = s.find(r'\boxed{', i) |
|
|
if j == -1: |
|
|
out.append(s[i:]); break |
|
|
out.append(s[i:j]) |
|
|
k = j + len(r'\boxed{'); depth = 1 |
|
|
while k < n and depth > 0: |
|
|
c = s[k] |
|
|
if c == '\\' and k + 1 < n: |
|
|
k += 2; continue |
|
|
if c == '{': depth += 1 |
|
|
elif c == '}': depth -= 1 |
|
|
k += 1 |
|
|
if depth != 0: |
|
|
out.append(s[j:]); i = n; break |
|
|
inner = s[j+len(r'\boxed{'):k-1] |
|
|
inner = _sanitize_inside_math(inner) |
|
|
inner = _force_multiline_box_content(inner) |
|
|
fixed = r'\boxed{' + inner + '}' |
|
|
out.append(fixed if _inside_display(j) else "$$\n" + fixed + "\n$$") |
|
|
i = k |
|
|
s = "".join(out) |
|
|
|
|
|
|
|
|
s = _unwrap_envs_from_text(s) |
|
|
|
|
|
s = _cleanup_tex_command_braces_and_percent(s) |
|
|
|
|
|
s = _normalize_escaped_closing_braces_for_single_arg_commands(s, ["textbf", "emph", "text"]) |
|
|
|
|
|
s = _repair_unclosed_single_arg_commands_in_text_groups(s, ["textbf", "emph"]) |
|
|
|
|
|
s = _repair_unclosed_single_arg_commands(s, ["textbf", "emph", "text"]) |
|
|
return s |
|
|
|
|
|
def _final_response_bubble(text_md: str) -> str: |
|
|
"""Standalone GREEN bubble for the last/primary narrative.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
text_md = ensure_display_math(text_md) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
nlc = ' \n\n' |
|
|
|
|
|
return ( |
|
|
f"<div class=\"fathom-bubble\" style=\"background:#e9f8ee;border:1px solid #c8ebd4;" |
|
|
f"border-radius:12px;padding:14px 16px;margin:10px 0;color:#000;\">" |
|
|
f"<strong>Final Response</strong>:\n\n" |
|
|
f"<div class=\"fathom-content\" style=\"color:#000 !important;\">" |
|
|
f"{nlc + format_math(text_md).strip()}" |
|
|
f"</div>\n\n" |
|
|
f"</div>" |
|
|
) |
|
|
|
|
|
def _final_answer_block(answer: Any) -> str: |
|
|
if answer is None: |
|
|
return "**Final Answer**:\n\n_(not available)_\n\n" + SPACER |
|
|
s = str(answer).strip() |
|
|
return f"**Final Answer**:\n\n$$\\boxed{{{s}}}$$\n\n{SPACER}" |
|
|
|
|
|
def _remove_progress_bubble_if_any(history: List[Dict[str, str]], |
|
|
progress_bubble_idx: Optional[int]) -> Optional[int]: |
|
|
if progress_bubble_idx is None: |
|
|
return None |
|
|
del history[progress_bubble_idx] |
|
|
return None |
|
|
|
|
|
def _spinner_text_bubble(label: str, frame: str) -> str: |
|
|
"""ASCII spinner frame bubble.""" |
|
|
return ( |
|
|
f"<div class='fathom-bubble' style='display:flex;align-items:center;gap:10px;" |
|
|
f"background:#f3f6fa;border:1px solid #e2e8f0;border-radius:12px;" |
|
|
f"padding:10px 12px;margin:8px 0;color:#000;'>" |
|
|
f"<div style='font-family:monospace;font-weight:700;'>{frame}</div>" |
|
|
f"<div style='color:#000;font-weight:600;'>{label}</div>" |
|
|
f"</div>" |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def generate_response( |
|
|
user_message: str, |
|
|
max_tokens: int, |
|
|
temperature: float, |
|
|
top_p: float, |
|
|
history_state: list, |
|
|
convo_id: str, |
|
|
raw_prompts: dict, |
|
|
deepresearch_on_value: bool, |
|
|
summary_llm: str |
|
|
): |
|
|
if not user_message.strip(): |
|
|
|
|
|
yield history_state, history_state, raw_prompts, "", "" |
|
|
return |
|
|
|
|
|
old_prompt = raw_prompts.get(convo_id) |
|
|
history = history_state + [{"role": "user", "content": user_message}] |
|
|
|
|
|
|
|
|
cluster_idx: Optional[int] = None |
|
|
steps: List[Dict[str, Any]] = [] |
|
|
|
|
|
progress_bubble_idx: Optional[int] = None |
|
|
spinner_frames = ["-", "\\", "|", "/"] |
|
|
spinner_i = 0 |
|
|
spinner_label = "" |
|
|
|
|
|
|
|
|
visited_urls: List[str] = [] |
|
|
visited_set: set = set() |
|
|
search_urls_all: List[str] = [] |
|
|
search_set: set = set() |
|
|
sources_bubble_idx: Optional[int] = None |
|
|
|
|
|
|
|
|
visited_meta_map: Dict[str, Dict[str, str]] = {} |
|
|
search_meta_map: Dict[str, Dict[str, str]] = {} |
|
|
visited_html_str: str = "" |
|
|
search_html_str: str = "" |
|
|
|
|
|
|
|
|
def _truncate_50(s: Any) -> str: |
|
|
t = str(_pretty_json(s)) |
|
|
t = re.sub(r'\s+', ' ', t).strip() |
|
|
return (t[:50] + "…") if len(t) > 50 else t |
|
|
|
|
|
def _card_html(url: str, title: Optional[str], snippet: Optional[str]) -> str: |
|
|
esc_url = html.escape(url, quote=True) |
|
|
esc_title = html.escape((title or url).strip(), quote=True) |
|
|
esc_snip = html.escape((snippet or "").strip(), quote=True) |
|
|
return ( |
|
|
"<div class='source-card' style='background:#fff;border:1px solid #e5e7eb;" |
|
|
"border-radius:12px;padding:10px 12px;margin:8px 0;'>" |
|
|
f"<div style='font-weight:600;'><a href='{esc_url}' target='_blank' rel='noopener noreferrer'>{esc_title}</a></div>" |
|
|
f"<div style='color:#111;margin-top:6px;font-size:1em;'>{_truncate_50(esc_snip)}</div>" |
|
|
"</div>" |
|
|
) |
|
|
|
|
|
def _parse_search_entries(res: Any) -> List[Dict[str, str]]: |
|
|
""" |
|
|
Parse markdown-like search results into list of {url, title, snippet}. |
|
|
Fallback to bare URLs if no rich structure is found. |
|
|
""" |
|
|
text = _pretty_json(res) |
|
|
if not isinstance(text, str): |
|
|
text = str(text) |
|
|
entries: List[Dict[str, str]] = [] |
|
|
|
|
|
pat = re.compile( |
|
|
r'###\s*\d+\.\s*(?P<title>.*?)\s*\n\*\*URL\*\*:\s*(?P<url>\S+)\s*\n\*\*Snippet\*\*:\s*(?P<snip>.*?)(?:\n---|\Z)', |
|
|
re.S |
|
|
) |
|
|
for m in pat.finditer(text): |
|
|
url = (m.group('url') or '').strip() |
|
|
title = (m.group('title') or '').strip() |
|
|
snip = (m.group('snip') or '').strip() |
|
|
if url: |
|
|
entries.append({'url': url, 'title': title, 'snippet': snip}) |
|
|
|
|
|
if not entries: |
|
|
for u in _extract_urls_from_result(res): |
|
|
entries.append({'url': u, 'title': '', 'snippet': ''}) |
|
|
|
|
|
return entries |
|
|
|
|
|
def _extract_query_summary(res: Any) -> str: |
|
|
""" |
|
|
Robustly extract a concise summary from a tool result. |
|
|
|
|
|
Priority: |
|
|
1) If result is a dict, look for common summary-like fields. |
|
|
2) If text contains "Summary:" use the LAST occurrence closest to the end, |
|
|
and return everything after "Summary:" up to the next blank line or next |
|
|
labeled section (e.g., "URL:", "Title:", "---") or end of string. |
|
|
3) Otherwise, fallback to the first 1–2 sentences. |
|
|
|
|
|
Always returns a non-empty string if any meaningful text is present. |
|
|
""" |
|
|
splits = res.split("Summary:") |
|
|
last_part = splits[-1] |
|
|
return last_part.strip() or "(info unavailable)" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _render_sidebar_lists(visited_list: List[str]) -> str: |
|
|
""" |
|
|
Build HTML strings for the visited and search sections as card lists. |
|
|
""" |
|
|
|
|
|
|
|
|
v_parts: List[str] = [] |
|
|
for u in visited_list: |
|
|
meta = visited_meta_map.get(u, {}) |
|
|
v_parts.append(_card_html(u, meta.get('title') or '', meta.get('snippet') or '(fetching…)')) |
|
|
visited_html = "".join(v_parts) or "<div style='color:#555;'>(none yet)</div>" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return visited_html |
|
|
|
|
|
def _extract_urls_from_result(res: Any) -> List[str]: |
|
|
""" |
|
|
Robustly extract URLs from tool results. |
|
|
Handles: |
|
|
- dicts (look for 'url', 'link', 'href' keys, and scan values) |
|
|
- lists/tuples |
|
|
- strings (prefer '**URL**: https://...' lines; else generic URL regex) |
|
|
Preserves encounter order and uniqueness for this call result. |
|
|
""" |
|
|
found: List[str] = [] |
|
|
seen: set = set() |
|
|
|
|
|
def _add(u: str): |
|
|
if not isinstance(u, str): |
|
|
return |
|
|
u = u.strip() |
|
|
if not u: |
|
|
return |
|
|
|
|
|
u2 = re.sub(r'[)\]\}>,;]+$', '', u) |
|
|
if u2 not in seen: |
|
|
seen.add(u2) |
|
|
found.append(u2) |
|
|
|
|
|
def _walk(x: Any): |
|
|
if x is None: |
|
|
return |
|
|
if isinstance(x, str): |
|
|
s = x |
|
|
|
|
|
for m in re.findall(r'\*\*URL\*\*:\s*(https?://\S+)', s): |
|
|
_add(m) |
|
|
|
|
|
for m in re.findall(r'https?://[^\s<>()\]\}"]+', s): |
|
|
_add(m) |
|
|
elif isinstance(x, dict): |
|
|
|
|
|
for k in ("url", "link", "href"): |
|
|
v = x.get(k) |
|
|
if isinstance(v, str): |
|
|
_add(v) |
|
|
for v in x.values(): |
|
|
_walk(v) |
|
|
elif isinstance(x, (list, tuple, set)): |
|
|
for v in x: |
|
|
_walk(v) |
|
|
else: |
|
|
s = str(x) |
|
|
for m in re.findall(r'https?://[^\s<>()\]\}"]+', s): |
|
|
_add(m) |
|
|
|
|
|
_walk(res) |
|
|
return found |
|
|
|
|
|
|
|
|
def _normalize_url(u: str) -> str: |
|
|
try: |
|
|
s = (u or "").strip() |
|
|
if not s: |
|
|
return "" |
|
|
|
|
|
s = re.sub(r'#.*$', '', s) |
|
|
|
|
|
m = re.match(r'^(https?)://([^/]+)(/.*|$)', s, flags=re.I) |
|
|
if m: |
|
|
scheme = m.group(1).lower() |
|
|
host = m.group(2).lower() |
|
|
host = host[4:] if host.startswith('www.') else host |
|
|
rest = m.group(3) or '' |
|
|
|
|
|
if rest.endswith('/') and len(rest) > 1: |
|
|
rest = rest[:-1] |
|
|
return f"{scheme}://{host}{rest}" |
|
|
|
|
|
if s.endswith('/') and len(s) > 1: |
|
|
s = s[:-1] |
|
|
return s |
|
|
except Exception: |
|
|
return (u or "").strip() |
|
|
|
|
|
def _search_meta_snippet_for_url(search_meta_map: Dict[str, Dict[str, str]], url: str) -> str: |
|
|
"""Find a snippet in search_meta_map using normalized URL comparison.""" |
|
|
try: |
|
|
n = _normalize_url(url) |
|
|
for k, meta in (search_meta_map or {}).items(): |
|
|
if _normalize_url(k) == n: |
|
|
return (meta.get("snippet") or "").strip() |
|
|
except Exception: |
|
|
pass |
|
|
return "" |
|
|
|
|
|
def _upsert_sources_bubble(): |
|
|
""" |
|
|
Repurposed: update the sidebar HTML strings instead of inserting a chat bubble. |
|
|
""" |
|
|
|
|
|
|
|
|
|
|
|
nonlocal visited_html_str |
|
|
|
|
|
visited_html_str = _render_sidebar_lists(visited_urls) |
|
|
|
|
|
|
|
|
q: Queue = Queue() |
|
|
|
|
|
def _worker(): |
|
|
try: |
|
|
|
|
|
|
|
|
gen = answer_question_recall( |
|
|
question=user_message, |
|
|
temperature=float(temperature), |
|
|
top_p=float(top_p), |
|
|
max_new_tokens=int(max_tokens), |
|
|
old_prompt=old_prompt, |
|
|
deepresearch_on=deepresearch_on_value, |
|
|
summary_llm=summary_llm |
|
|
) |
|
|
while True: |
|
|
tag, out = next(gen) |
|
|
q.put((tag, out)) |
|
|
if tag == "answer": |
|
|
break |
|
|
except StopIteration as e: |
|
|
|
|
|
q.put(("stop", getattr(e, "value", None))) |
|
|
except Exception as e: |
|
|
q.put(("error", str(e))) |
|
|
|
|
|
t = threading.Thread(target=_worker, daemon=True) |
|
|
t.start() |
|
|
|
|
|
|
|
|
while True: |
|
|
try: |
|
|
tag, out = q.get(timeout=0.1) |
|
|
|
|
|
progress_bubble_idx = _remove_progress_bubble_if_any(history, progress_bubble_idx) |
|
|
|
|
|
if tag == "assistant_resp": |
|
|
assistant_text, tool_calls = out or ("", []) |
|
|
|
|
|
steps.append({"text": assistant_text, "calls": tool_calls or [], "results": []}) |
|
|
|
|
|
|
|
|
for call in (tool_calls or []): |
|
|
norm = _normalize_tool_call(call) |
|
|
name = (norm.get("name") or "").lower() |
|
|
if name == "query_url": |
|
|
u = str((norm.get("arguments") or {}).get("url", "")).strip() |
|
|
if u and u not in visited_set: |
|
|
visited_set.add(u) |
|
|
visited_urls.append(u) |
|
|
|
|
|
if u not in visited_meta_map: |
|
|
|
|
|
visited_meta_map[u] = {"title": "", "snippet": "(fetching…)"} |
|
|
|
|
|
|
|
|
condensed_html_list = [] |
|
|
for idx, tstep in enumerate(steps): |
|
|
condensed_html_list.append( |
|
|
_condensed_step_block( |
|
|
tstep["text"], |
|
|
tstep["calls"], |
|
|
tstep["results"], |
|
|
|
|
|
last_open=True |
|
|
) |
|
|
) |
|
|
cluster_html = _cluster_wrapper("".join(condensed_html_list).strip() or "_(no steps)_") |
|
|
|
|
|
if cluster_idx is None: |
|
|
history.append({"role": "assistant", "content": cluster_html}) |
|
|
cluster_idx = len(history) - 1 |
|
|
else: |
|
|
history[cluster_idx]["content"] = cluster_html |
|
|
|
|
|
|
|
|
_upsert_sources_bubble() |
|
|
yield history, history, raw_prompts, visited_html_str, search_html_str |
|
|
|
|
|
|
|
|
spinner_label = "Working… fetching results" |
|
|
frame = spinner_frames[spinner_i % len(spinner_frames)] |
|
|
history.append({"role": "assistant", "content": _spinner_text_bubble(spinner_label, frame)}) |
|
|
progress_bubble_idx = len(history) - 1 |
|
|
spinner_i += 1 |
|
|
yield history, history, raw_prompts, visited_html_str, search_html_str |
|
|
|
|
|
elif tag == "tool_results": |
|
|
results_list = out[0] or [] |
|
|
|
|
|
if steps: |
|
|
steps[-1]["results"] = results_list |
|
|
|
|
|
|
|
|
calls = steps[-1].get("calls") or [] |
|
|
for i, call in enumerate(calls): |
|
|
norm = _normalize_tool_call(call) |
|
|
name = (norm.get("name") or "").lower() |
|
|
if name == "query_url": |
|
|
|
|
|
u = str((norm.get("arguments") or {}).get("url", "")).strip() |
|
|
if u and u not in visited_set: |
|
|
visited_set.add(u) |
|
|
visited_urls.append(u) |
|
|
|
|
|
res_i = results_list[i] if i < len(results_list) else None |
|
|
|
|
|
if u: |
|
|
if u not in visited_meta_map: |
|
|
|
|
|
visited_meta_map[u] = {"title": "", "snippet": ""} |
|
|
|
|
|
visited_meta_map[u]["snippet"] = _extract_query_summary(res_i) |
|
|
elif name == "search_urls": |
|
|
res_i = results_list[i] if i < len(results_list) else None |
|
|
|
|
|
for entry in _parse_search_entries(res_i): |
|
|
u = entry.get("url", "").strip() |
|
|
if not u: |
|
|
continue |
|
|
if u not in search_set and u not in visited_set: |
|
|
search_set.add(u) |
|
|
search_urls_all.append(u) |
|
|
search_meta_map[u] = { |
|
|
"title": entry.get("title", "").strip(), |
|
|
"snippet": entry.get("snippet", "").strip(), |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
condensed_html_list = [] |
|
|
for idx, tstep in enumerate(steps): |
|
|
condensed_html_list.append( |
|
|
_condensed_step_block( |
|
|
tstep["text"], |
|
|
tstep["calls"], |
|
|
tstep["results"], |
|
|
|
|
|
last_open=True |
|
|
) |
|
|
) |
|
|
cluster_html = _cluster_wrapper("".join(condensed_html_list).strip()) |
|
|
if cluster_idx is not None: |
|
|
history[cluster_idx]["content"] = cluster_html |
|
|
|
|
|
|
|
|
_upsert_sources_bubble() |
|
|
yield history, history, raw_prompts, visited_html_str, search_html_str |
|
|
|
|
|
|
|
|
spinner_label = "Working… synthesizing LLM response" |
|
|
frame = spinner_frames[spinner_i % len(spinner_frames)] |
|
|
history.append({"role": "assistant", "content": _spinner_text_bubble(spinner_label, frame)}) |
|
|
progress_bubble_idx = len(history) - 1 |
|
|
spinner_i += 1 |
|
|
yield history, history, raw_prompts, visited_html_str, search_html_str |
|
|
|
|
|
elif tag == "end": |
|
|
chat_str = out[0] |
|
|
raw_prompts[convo_id] = chat_str |
|
|
|
|
|
|
|
|
final_resp_html = "" |
|
|
if steps: |
|
|
last = steps[-1] |
|
|
final_resp_html = _final_response_bubble(last["text"]) |
|
|
prev_steps = steps[:-1] |
|
|
else: |
|
|
prev_steps = [] |
|
|
|
|
|
|
|
|
if cluster_idx is not None: |
|
|
if prev_steps: |
|
|
condensed_html_list = [] |
|
|
for tstep in prev_steps: |
|
|
condensed_html_list.append( |
|
|
_condensed_step_block(tstep["text"], tstep["calls"], tstep["results"], last_open=True) |
|
|
) |
|
|
cluster_html = _cluster_wrapper("".join(condensed_html_list).strip()) |
|
|
history[cluster_idx]["content"] = cluster_html |
|
|
else: |
|
|
|
|
|
deleted_index = cluster_idx |
|
|
del history[cluster_idx] |
|
|
cluster_idx = None |
|
|
if sources_bubble_idx is not None and sources_bubble_idx > deleted_index: |
|
|
sources_bubble_idx -= 1 |
|
|
|
|
|
|
|
|
_upsert_sources_bubble() |
|
|
|
|
|
|
|
|
if sources_bubble_idx is not None: |
|
|
insert_at = sources_bubble_idx + 1 |
|
|
elif cluster_idx is not None: |
|
|
insert_at = cluster_idx + 1 |
|
|
else: |
|
|
insert_at = len(history) |
|
|
|
|
|
if final_resp_html: |
|
|
history.insert(insert_at, {"role": "assistant", "content": final_resp_html}) |
|
|
insert_at += 1 |
|
|
|
|
|
|
|
|
history.insert(insert_at, {"role": "assistant", "content": f"{SPACER}✅ Information retrieved\n\n{SPACER}"}) |
|
|
yield history, history, raw_prompts, visited_html_str, search_html_str |
|
|
|
|
|
|
|
|
spinner_label = "" |
|
|
|
|
|
elif tag == "answer": |
|
|
answer = out[0] if out else None |
|
|
final_block = _final_answer_block(answer) |
|
|
yield history, history, raw_prompts, visited_html_str, search_html_str |
|
|
break |
|
|
|
|
|
else: |
|
|
break |
|
|
|
|
|
except Empty: |
|
|
|
|
|
if progress_bubble_idx is not None and spinner_label: |
|
|
frame = spinner_frames[spinner_i % len(spinner_frames)] |
|
|
history[progress_bubble_idx]["content"] = _spinner_text_bubble(spinner_label, frame) |
|
|
spinner_i += 1 |
|
|
yield history, history, raw_prompts, visited_html_str, search_html_str |
|
|
|
|
|
|
|
|
except StopIteration as e: |
|
|
tag, out = e.value if e.value else (None, None) |
|
|
if tag != "answer": |
|
|
break |
|
|
answer = out[0] if out else None |
|
|
final_block = _final_answer_block(answer) |
|
|
yield history, history, raw_prompts, visited_html_str, search_html_str |
|
|
break |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
example_messages = { |
|
|
"DeepResearch on the latest GST policy": ( |
|
|
"Do a detailed deepresearch on the recent GST rate change, find all major rate changes and tell me about its implications. Find the official government sources and also tell me about how it will affect the general public." |
|
|
), |
|
|
"UPSC 2025 Prelims Question": ( |
|
|
"Among a list of European countries (Austria, Bulgaria, Croatia, Serbia, Sweden, North Macedonia), how many are NATO members?" |
|
|
), |
|
|
"DeepResearch on inflammation in treatments for cancer": ( |
|
|
"Access recent cancer studies that used antibody-drug treatments and do a detailed deepresearch on the studies. Tell me about how long after starting did lung inflammation usually appear (average weeks), and how many out of 100 had a serious cases? Discuss about the possible reasons, results, and conclusions drawn from these studies." |
|
|
), |
|
|
"Which Indian math model was post-trained for ~$499?": ( |
|
|
"Identify the Indian AI lab which reportedly post-trained a math model for about $499 with performance comparable to o3-mini. Give model name, lab name, and brief method summary." |
|
|
), |
|
|
"DeepResearch analysis on EV Adoption": ( |
|
|
"Do an in-depth, data-driven comparison of EV adoption in the US, Europe, China, and India. Cover: (1) Market overview—penetration rates, 5–10 year trends, and key policies/incentives shaping growth. (2) Consumer behavior—attitudes and purchase drivers (environment, TCO, tech). (3) Charging—public/private build-out, accessibility, tech differences, and major initiatives. (4) Automakers—top OEM strategies, production capacity, partnerships, and cross-border competition. (5) Barriers vs. opportunities—price, range, supply chain vs. battery advances, renewables integration, smart mobility. (6) Outlook to 2030—regional growth projections, impacts on global trends and emerging markets. Use specific examples and recent quantitative data" |
|
|
) |
|
|
} |
|
|
|
|
|
def pick_random_example(): |
|
|
try: |
|
|
return gr.update(value=random.choice(list(example_messages.values()))) |
|
|
except Exception: |
|
|
return gr.update(value="") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _clear_current_conversation(convo_id: str, |
|
|
conversations: Dict[str, Any], |
|
|
raw_prompts: Dict[str, str]): |
|
|
if convo_id in conversations: |
|
|
conversations[convo_id]["messages"] = [] |
|
|
if convo_id in raw_prompts: |
|
|
del raw_prompts[convo_id] |
|
|
return conversations, raw_prompts |
|
|
|
|
|
with gr.Blocks(theme=gr.themes.Soft()) as demo: |
|
|
conversations_state = gr.State({}) |
|
|
current_convo_id = gr.State(generate_conversation_id()) |
|
|
history_state = gr.State([]) |
|
|
raw_prompts_state = gr.State({}) |
|
|
|
|
|
|
|
|
sources_visible = gr.State(False) |
|
|
|
|
|
|
|
|
deepresearch_on = gr.State(True) |
|
|
|
|
|
|
|
|
summary_llm = gr.State(os.environ["SUMMARY_HF_MODEL_URL"]) |
|
|
|
|
|
|
|
|
|
|
|
gr.HTML( |
|
|
""" |
|
|
<style> |
|
|
/* Keep MathML visually hidden (screen-reader only). DO NOT wrap/size this layer. */ |
|
|
/* Global font scale: slightly smaller across the app */ |
|
|
html { font-size: 14px; } |
|
|
body { margin: 0 !important; padding: 0 !important; } |
|
|
.katex .katex-mathml { |
|
|
position: absolute !important; |
|
|
overflow: hidden !important; |
|
|
clip: rect(1px, 1px, 1px, 1px) !important; |
|
|
white-space: nowrap !important; |
|
|
height: 1px !important; |
|
|
width: 1px !important; |
|
|
padding: 0 !important; |
|
|
border: 0 !important; |
|
|
margin: 0 !important; |
|
|
} |
|
|
|
|
|
/* Work only on the visible HTML layer */ |
|
|
.katex-display { |
|
|
text-align: left; |
|
|
overflow-x: auto; /* scroll only if truly needed */ |
|
|
overflow-y: hidden; |
|
|
margin: 0.5rem 0; |
|
|
} |
|
|
.katex-display > .katex, |
|
|
.katex .katex-html { |
|
|
white-space: normal !important; /* enable wrapping of long \text{...} etc. */ |
|
|
word-break: break-word; |
|
|
overflow-wrap: anywhere; |
|
|
} |
|
|
|
|
|
/* A sane line-height so glyphs don't overlap or clip */ |
|
|
.katex, |
|
|
.katex .katex-html { |
|
|
line-height: 1.2 !important; |
|
|
} |
|
|
|
|
|
/* Slight padding for boxed math so it doesn't feel cramped */ |
|
|
.katex .fbox { padding: 4px 6px; } |
|
|
|
|
|
/* High-contrast: force bubble/content text to black, except pre/code (they get a light theme) */ |
|
|
.fathom-bubble, |
|
|
.fathom-bubble *:not(pre):not(code), |
|
|
.fathom-bubble details > summary, |
|
|
.fathom-bubble strong, |
|
|
.fathom-bubble a { color: #000 !important; } |
|
|
|
|
|
/* Ensure numbered/bullet markers are also black */ |
|
|
.fathom-bubble li::marker { color: #000 !important; } |
|
|
|
|
|
/* Narrative containers used for assistant/final content */ |
|
|
.fathom-content, |
|
|
.fathom-content *:not(pre):not(code) { color: #000 !important; } |
|
|
.fathom-content li::marker { color: #000 !important; } |
|
|
|
|
|
.fathom-bubble a { color: #000 !important; text-decoration: underline; } |
|
|
|
|
|
/* Code blocks: light theme to avoid black text on dark gray in dark mode */ |
|
|
.fathom-bubble pre, |
|
|
.fathom-bubble code { |
|
|
background: #f6f8fa !important; |
|
|
color: #111 !important; |
|
|
} |
|
|
.fathom-bubble pre { |
|
|
border: 1px solid #e5e7eb; |
|
|
border-radius: 8px; |
|
|
padding: 10px 12px; |
|
|
overflow-x: auto; |
|
|
} |
|
|
.fathom-bubble code { |
|
|
padding: 2px 4px; |
|
|
border-radius: 4px; |
|
|
} |
|
|
|
|
|
/* Ensure KaTeX text also stays black */ |
|
|
.fathom-content .katex, |
|
|
.fathom-content .katex * { color: #000 !important; } |
|
|
|
|
|
/* Pull chat to the very top by removing default top gaps */ |
|
|
.gradio-container { padding-top: 0 !important; } |
|
|
.gradio-container .gr-block, |
|
|
.gradio-container .gr-column, |
|
|
.gradio-container .gr-row { margin-top: 0 !important; } |
|
|
#right_col { margin-top: -30px !important; padding-top: 0 !important; } |
|
|
#main_chat { margin-top: 0 !important; } |
|
|
#main_chat .label { margin-top: 0 !important; } |
|
|
|
|
|
/* Sources cards link color */ |
|
|
.source-card a { color: #000 !important; text-decoration: underline; } |
|
|
/* Global font-size overrides to shrink typography app-wide */ |
|
|
:root, html, body, .gradio-container { font-size: 12px !important; } |
|
|
|
|
|
/* Shrink common UI elements using rem so it doesn't cascade multiply */ |
|
|
.gradio-container, |
|
|
.gradio-container .prose, |
|
|
.gradio-container .label, |
|
|
.gradio-container button, |
|
|
.gradio-container input, |
|
|
.gradio-container textarea, |
|
|
.gradio-container select, |
|
|
.gradio-container .tab-nav, |
|
|
.gradio-container .tabitem, |
|
|
.gradio-container .gradio-html, |
|
|
.gradio-container .gradio-markdown, |
|
|
#main_chat, |
|
|
#main_chat *:not(pre):not(code), |
|
|
#sources_sidebar, |
|
|
#sources_sidebar *:not(pre):not(code), |
|
|
.fathom-bubble, |
|
|
.fathom-bubble *:not(pre):not(code), |
|
|
.fathom-content, |
|
|
.fathom-content *:not(pre):not(code), |
|
|
.source-card, |
|
|
.source-card *:not(pre):not(code) { |
|
|
font-size: 1rem !important; |
|
|
} |
|
|
/* Radio fonts smaller */ |
|
|
#conv_selector, #conv_selector * { font-size: 1rem !important; } |
|
|
#slider, #slider * { font-size: 1rem !important; } |
|
|
.gradio-container [role="radiogroup"] label, |
|
|
.gradio-container input[type="radio"] + label { |
|
|
font-size: 1rem !important; |
|
|
} |
|
|
|
|
|
/* Headings and labels smaller */ |
|
|
.gradio-container h1 { font-size: 1.5rem !important; } |
|
|
.gradio-container h2 { font-size: 1.25rem !important; } |
|
|
.gradio-container h3 { font-size: 1rem !important; } |
|
|
.gradio-container .label, |
|
|
.gradio-container .gr-input-label, |
|
|
.gradio-container .block-label { font-size: 1rem !important; } |
|
|
|
|
|
/* Button sizing and layout: single-row compact triple buttons */ |
|
|
/* --- Responsive composer that matches chat width --- */ |
|
|
#composer_row { |
|
|
display: flex; |
|
|
flex-wrap: wrap; /* allow wrapping on small screens */ |
|
|
gap: 8px; |
|
|
align-items: stretch; |
|
|
} |
|
|
|
|
|
/* Let the input and the controls share the row and wrap when needed */ |
|
|
#composer_input { flex: 1 1 480px; min-width: 0; } |
|
|
#controls_col { flex: 1 1 240px; min-width: 0; display: flex; flex-direction: column; gap: 6px; } |
|
|
|
|
|
/* 2×2 grid; only this gap applies */ |
|
|
#buttons_grid { |
|
|
display: grid !important; |
|
|
grid-template-columns: repeat(2, minmax(0, 1fr)); |
|
|
gap: 6px; /* ← your only spacing */ |
|
|
margin: 0 !important; /* no extra vertical space */ |
|
|
padding: 0 !important; |
|
|
} |
|
|
#buttons_grid button { width: 100%; } |
|
|
|
|
|
/* Two equal-width buttons per row; allow them to shrink and wrap text if needed */ |
|
|
#buttons_row_top, |
|
|
#buttons_row_bottom { |
|
|
display: flex; |
|
|
gap: 6px; |
|
|
} |
|
|
|
|
|
#buttons_row_top > *, |
|
|
#buttons_row_bottom > * { |
|
|
flex: 1 1 0; /* equal widths without hard max-widths */ |
|
|
min-width: 0; /* allow shrinking instead of overflowing */ |
|
|
} |
|
|
|
|
|
/* Make the actual <button> fill its component's width */ |
|
|
#controls_col button { width: 100%; } |
|
|
|
|
|
/* On narrow screens, stack input above the controls so nothing overflows */ |
|
|
@media (max-width: 820px) { |
|
|
#composer_input, |
|
|
#controls_col { flex-basis: 100% !important; } |
|
|
} |
|
|
|
|
|
/* Expand center chat area and compact Sources sidebar bubbles */ |
|
|
.gradio-container { max-width: 100% !important; } |
|
|
#right_col { width: auto !important; max-width: none !important; } |
|
|
#sources_sidebar .source-card { |
|
|
width: 100% !important; |
|
|
box-sizing: border-box !important; |
|
|
padding: 8px 10px !important; |
|
|
margin: 6px 0 !important; |
|
|
} |
|
|
#sources_sidebar .source-card *:not(pre):not(code) { |
|
|
font-size: 0.95rem !important; |
|
|
} |
|
|
|
|
|
</style> |
|
|
""" |
|
|
) |
|
|
|
|
|
with gr.Sidebar(width=270, open=False): |
|
|
gr.HTML(""" |
|
|
<div style="display:flex;flex-direction:column;align-items:flex-start;gap:8px;margin:0 0 0.5em 0;"> |
|
|
<div style="background-color:black;padding:6px;border-radius:8px;"> |
|
|
<img src="https://framerusercontent.com/images/j0KjQQyrUfkFw4NwSaxQOLAoBU.png" alt="Fractal AI Logo" style="height:48px;"> |
|
|
</div> |
|
|
<div style="display:flex;flex-direction:row;gap:10px !important;padding:6px;border-radius:8px;"> |
|
|
<a href="https://github.com/FractalAIResearchLabs/Fathom-Search-4B"> |
|
|
<svg style="border-radius:10px;padding:8px;background-color:white;" display="inline" height="48" aria-hidden="true" viewBox="0 0 24 24" version="1.1" width="48" data-view-component="true"> |
|
|
<path display="inline" d="M12 1C5.923 1 1 5.923 1 12c0 4.867 3.149 8.979 7.521 10.436.55.096.756-.233.756-.522 0-.262-.013-1.128-.013-2.049-2.764.509-3.479-.674-3.699-1.292-.124-.317-.66-1.293-1.127-1.554-.385-.207-.936-.715-.014-.729.866-.014 1.485.797 1.691 1.128.99 1.663 2.571 1.196 3.204.907.096-.715.385-1.196.701-1.471-2.448-.275-5.005-1.224-5.005-5.432 0-1.196.426-2.186 1.128-2.956-.111-.275-.496-1.402.11-2.915 0 0 .921-.288 3.024 1.128a10.193 10.193 0 0 1 2.75-.371c.936 0 1.871.123 2.75.371 2.104-1.43 3.025-1.128 3.025-1.128.605 1.513.221 2.64.111 2.915.701.77 1.127 1.747 1.127 2.956 0 4.222-2.571 5.157-5.019 5.432.399.344.743 1.004.743 2.035 0 1.471-.014 2.654-.014 3.025 0 .289.206.632.756.522C19.851 20.979 23 16.854 23 12c0-6.077-4.922-11-11-11Z"></path> |
|
|
</svg> |
|
|
</a> |
|
|
<a href="https://huggingface.co/FractalAIResearch/Fathom-Search-4B"> |
|
|
<img style="border-radius:10px;padding:8px;background-color:white;" display="inline" alt="Hugging Face's logo" height="48" width="48" src="https://huggingface.co/front/assets/huggingface_logo-noborder.svg"> |
|
|
</a> |
|
|
</div> |
|
|
<h2 style="margin:0">Fathom-Search-4B + Fathom-Synthesizer-4B</h2> |
|
|
</div> |
|
|
""") |
|
|
gr.Markdown("## 💬 Conversations") |
|
|
conversation_selector = gr.Radio(choices=[], label="Select Conversation", interactive=True, elem_id="conv_selector") |
|
|
new_convo_button = gr.Button("New Conversation ➕") |
|
|
|
|
|
gr.Markdown("## ⚙️ Settings") |
|
|
max_tokens_slider = gr.Slider(minimum=8000, maximum=40000, step=1000, value=40000, label="Max Tokens", elem_id="slider") |
|
|
with gr.Accordion("Advanced Settings", open=False): |
|
|
temperature_slider = gr.Slider(minimum=0.0, maximum=2.0, value=0.6, label="Temperature", elem_id="slider") |
|
|
top_p_slider = gr.Slider(minimum=0.0, maximum=1.0, value=0.95, label="Top-p", elem_id="slider") |
|
|
|
|
|
gr.Markdown("Note: Closing this demo window will clear saved conversations.") |
|
|
gr.Markdown(""" |
|
|
We sincerely acknowledge [VIDraft](https://huggingface.co/VIDraft) for their Phi 4 Reasoning Plus [space](https://huggingface.co/spaces/VIDraft/phi-4-reasoning-plus), which inspired parts of this demo UI. |
|
|
""") |
|
|
|
|
|
|
|
|
with gr.Sidebar(open=False, visible=False, width=240, elem_id="sources_sidebar", position="right") as sources_sidebar: |
|
|
gr.Markdown("# Sources") |
|
|
visited_sources_html = gr.HTML("<div style='color:#555;'>(none yet)</div>") |
|
|
|
|
|
search_sources_html = gr.HTML("", visible=False) |
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(elem_id="right_col"): |
|
|
chatbot = gr.Chatbot(label="Chat", type="messages", height=520, elem_id="main_chat", autoscroll=False) |
|
|
with gr.Row(elem_id="composer_row", equal_height=True): |
|
|
user_input = gr.Textbox(label="User Input", placeholder="Ask a web question…", lines=1, scale=5, min_width=280, elem_id="composer_input") |
|
|
with gr.Column(scale=2, min_width=220): |
|
|
with gr.Row(elem_id="buttons_grid"): |
|
|
submit_button = gr.Button("Send", variant="primary") |
|
|
clear_button = gr.Button("Clear") |
|
|
search_button = gr.Button("DeepResearch ON", variant="huggingface") |
|
|
sources_button = gr.Button("Sources") |
|
|
|
|
|
gr.Markdown("### Example questions") |
|
|
with gr.Row(): |
|
|
example1_button = gr.Button("DeepResearch on the latest GST policy", scale=1) |
|
|
example2_button = gr.Button("UPSC 2025 Prelims Question", scale=1) |
|
|
example3_button = gr.Button("DeepResearch on inflammation in treatments for cancer", scale=1) |
|
|
example4_button = gr.Button("Which Indian math model was post-trained for ~$499?", scale=1) |
|
|
example5_button = gr.Button("DeepResearch analysis on EV Adoption", scale=1) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def update_conversation_list(conversations): |
|
|
return [conversations[cid]["title"] for cid in conversations] |
|
|
|
|
|
def toggle_sources(is_open): |
|
|
return gr.update(open=not is_open), (not is_open) |
|
|
|
|
|
def toggle_search(is_on, convo_id, conversations): |
|
|
if convo_id in conversations: |
|
|
conversations[convo_id]["deepresearch_on"] = not is_on |
|
|
return gr.update(value = "DeepResearch OFF" if is_on else "DeepResearch ON", variant="secondary" if is_on else "huggingface"), (not is_on), conversations |
|
|
|
|
|
def render_visited_html(urls, meta=None, url_trunc=72, summary_trunc=100): |
|
|
if not urls: |
|
|
return "<div style='color:#555;'>(none yet)</div>" |
|
|
cards = [] |
|
|
meta = meta or {} |
|
|
for u in urls: |
|
|
href_url = u |
|
|
|
|
|
disp = u if len(u) <= url_trunc else (u[:url_trunc] + "…") |
|
|
esc_disp = html.escape(disp, quote=True) |
|
|
esc_href = html.escape(href_url, quote=True) |
|
|
|
|
|
|
|
|
raw_snip = (meta.get(u) or "").strip() |
|
|
snip_trunc = raw_snip if len(raw_snip) <= summary_trunc else (raw_snip[:summary_trunc] + "…") |
|
|
esc_snip = html.escape(snip_trunc, quote=True) |
|
|
|
|
|
cards.append( |
|
|
"<div class='source-card' style='background:#fff;border:1px solid #e5e7eb;" |
|
|
"border-radius:12px;padding:10px 12px;margin:8px 0;'>" |
|
|
f"<div style='font-weight:600;'><a href='{esc_href}' target='_blank' rel='noopener noreferrer'>{esc_disp}</a></div>" |
|
|
f"<div style='color:#111;margin-top:6px;font-size:1em;'>{esc_snip}</div>" |
|
|
"</div>" |
|
|
) |
|
|
return "".join(cards) |
|
|
|
|
|
def extract_urls_from_html(html_str): |
|
|
if not isinstance(html_str, str): |
|
|
return [] |
|
|
|
|
|
return re.findall(r"href=(?:'|\")(https?://[^'\\\"]+)(?:'|\")", html_str) |
|
|
|
|
|
def extract_url_snippets_from_visited_html(html_str): |
|
|
""" |
|
|
Parse visited_sources_html (cards) and extract [(url, snippet)] pairs. |
|
|
""" |
|
|
pairs = [] |
|
|
if not isinstance(html_str, str) or not html_str.strip(): |
|
|
return pairs |
|
|
card_pat = re.compile( |
|
|
r"<div class='source-card'.*?>.*?<a href='(https?://[^']+)'.*?</a>.*?<div[^>]*>(.*?)</div>.*?</div>", |
|
|
re.S |
|
|
) |
|
|
for m in card_pat.finditer(html_str): |
|
|
url = (m.group(1) or "").strip() |
|
|
snippet_raw = (m.group(2) or "").strip() |
|
|
|
|
|
snippet = re.sub(r"<[^>]+>", "", snippet_raw) |
|
|
if url: |
|
|
pairs.append((url, snippet)) |
|
|
return pairs |
|
|
|
|
|
def open_sources(vis): |
|
|
return gr.update(open=True), True |
|
|
|
|
|
|
|
|
def mark_sources_open(): |
|
|
return True |
|
|
|
|
|
def mark_sources_closed(): |
|
|
return False |
|
|
|
|
|
def get_visited_html_for_selected(selected_title, conversations): |
|
|
for cid, convo in conversations.items(): |
|
|
if convo["title"] == selected_title: |
|
|
src = convo.get("sources") or {} |
|
|
urls = src.get("visited", []) |
|
|
meta_map = src.get("meta", {}) |
|
|
return render_visited_html(urls, meta=meta_map) |
|
|
return render_visited_html([]) |
|
|
|
|
|
def start_new_conversation(conversations): |
|
|
new_id = generate_conversation_id() |
|
|
conversations[new_id] = { |
|
|
"title": f"New Conversation {new_id}", |
|
|
"messages": [], |
|
|
"sources": {"visited": [], "meta": {}}, |
|
|
"deepresearch_on": True |
|
|
} |
|
|
return new_id, [], gr.update(choices=update_conversation_list(conversations), value=conversations[new_id]["title"]), conversations, gr.update(value = "DeepResearch ON", variant="huggingface"), True |
|
|
|
|
|
def load_conversation(selected_title, conversations): |
|
|
for cid, convo in conversations.items(): |
|
|
if convo["title"] == selected_title: |
|
|
return cid, convo["messages"], convo["messages"], gr.update(value = "DeepResearch ON" if convo["deepresearch_on"] else "DeepResearch OFF", variant="huggingface" if convo["deepresearch_on"] else "secondary"), convo["deepresearch_on"] |
|
|
return current_convo_id.value, history_state.value, history_state.value, gr.update(value = "DeepResearch ON" if history_state["deepresearch_on"] else "DeepResearch OFF", variant="huggingface" if history_state["deepresearch_on"] else "secondary"), history_state["deepresearch_on"] |
|
|
|
|
|
def send_message(user_message, max_tokens, temperature, top_p, convo_id, history, conversations, raw_prompts, deepresearch_on_value, summary_llm): |
|
|
if convo_id not in conversations: |
|
|
title = " ".join(user_message.strip().split()[:5]) |
|
|
titles_in_convo = [] |
|
|
|
|
|
for conver_id in conversations: |
|
|
if title in conversations[conver_id]["title"]: |
|
|
titles_in_convo.append(conversations[conver_id]["title"]) |
|
|
|
|
|
while title in titles_in_convo: |
|
|
title += f" {generate_conversation_id()}" |
|
|
|
|
|
|
|
|
conversations[convo_id] = {"title": title, "messages": history, "sources": {"visited": [], "meta": {}}, "deepresearch_on": deepresearch_on_value} |
|
|
if "sources" not in conversations[convo_id]: |
|
|
conversations[convo_id]["sources"] = {"visited": [], "meta": {}} |
|
|
else: |
|
|
conversations[convo_id]["sources"].setdefault("visited", []) |
|
|
conversations[convo_id]["sources"].setdefault("meta", {}) |
|
|
if conversations[convo_id]["title"].startswith("New Conversation"): |
|
|
title = " ".join(user_message.strip().split()[:5]) |
|
|
|
|
|
titles_in_convo = [] |
|
|
|
|
|
for conver_id in conversations: |
|
|
if title in conversations[conver_id]["title"]: |
|
|
titles_in_convo.append(conversations[conver_id]["title"]) |
|
|
|
|
|
while title in titles_in_convo: |
|
|
title += f" {generate_conversation_id()}" |
|
|
|
|
|
conversations[convo_id]["title"] = title |
|
|
|
|
|
|
|
|
try: |
|
|
conv_sources = conversations[convo_id]["sources"] |
|
|
except Exception: |
|
|
conv_sources = conversations[convo_id].setdefault("sources", {"visited": [], "meta": {}}) |
|
|
visited_union_html = render_visited_html(conv_sources.get("visited", []), meta=conv_sources.get("meta", {})) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
yield ( |
|
|
history, |
|
|
history, |
|
|
gr.update(choices=update_conversation_list(conversations), value=conversations[convo_id]["title"]), |
|
|
conversations, |
|
|
raw_prompts, |
|
|
gr.update(value=visited_union_html), |
|
|
gr.update(value=""), |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
) |
|
|
for updated_history, new_history, new_raw_prompts, visited_html, search_html in generate_response( |
|
|
user_message, max_tokens, temperature, top_p, history, convo_id, raw_prompts, deepresearch_on_value, summary_llm |
|
|
): |
|
|
|
|
|
conversations[convo_id]["messages"] = new_history |
|
|
raw_prompts = new_raw_prompts |
|
|
|
|
|
conv_sources = conversations[convo_id]["sources"] |
|
|
visited_list = conv_sources["visited"] |
|
|
meta_map = conv_sources["meta"] |
|
|
|
|
|
|
|
|
turn_urls = extract_urls_from_html(visited_html) |
|
|
for u in turn_urls: |
|
|
if u not in visited_list: |
|
|
visited_list.append(u) |
|
|
|
|
|
|
|
|
url_snips_search = extract_url_snippets_from_visited_html(search_html) |
|
|
for u, snip in url_snips_search: |
|
|
snip = (snip or "").strip() |
|
|
if not snip: |
|
|
continue |
|
|
|
|
|
existing = (meta_map.get(u) or "").strip() |
|
|
if not existing or existing == "(fetching…)": |
|
|
meta_map[u] = snip |
|
|
|
|
|
url_snips_visited = extract_url_snippets_from_visited_html(visited_html) |
|
|
for u, snip in url_snips_visited: |
|
|
snip = (snip or "").strip() |
|
|
if not snip: |
|
|
continue |
|
|
|
|
|
existing = (meta_map.get(u) or "").strip() |
|
|
|
|
|
if snip == "(fetching…)": |
|
|
|
|
|
if not existing: |
|
|
meta_map[u] = snip |
|
|
else: |
|
|
|
|
|
if not existing or existing == "(fetching…)" or snip != existing: |
|
|
meta_map[u] = snip |
|
|
|
|
|
|
|
|
visited_union_html = render_visited_html(visited_list, meta=meta_map) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
yield ( |
|
|
updated_history, |
|
|
new_history, |
|
|
gr.update(choices=update_conversation_list(conversations), value=conversations[convo_id]["title"]), |
|
|
conversations, |
|
|
raw_prompts, |
|
|
gr.update(value=visited_union_html), |
|
|
gr.update(value=""), |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
) |
|
|
|
|
|
def clear_current(user_convo_id, conversations, raw_prompts): |
|
|
""" |
|
|
UI clear + memory clear for the *current* conversation. |
|
|
""" |
|
|
conversations, raw_prompts = _clear_current_conversation(user_convo_id, conversations, raw_prompts) |
|
|
if user_convo_id in conversations: |
|
|
conversations[user_convo_id].setdefault("sources", {"visited": [], "meta": {}}) |
|
|
conversations[user_convo_id]["sources"]["visited"] = [] |
|
|
conversations[user_convo_id]["sources"]["meta"] = {} |
|
|
return ( |
|
|
[], |
|
|
[], |
|
|
gr.update(choices=update_conversation_list(conversations), |
|
|
value=conversations.get(user_convo_id, {}).get("title", "")), |
|
|
conversations, |
|
|
raw_prompts, |
|
|
gr.update(value=""), |
|
|
gr.update(value=""), |
|
|
) |
|
|
|
|
|
submit_button.click( |
|
|
fn=send_message, |
|
|
inputs=[user_input, max_tokens_slider, temperature_slider, top_p_slider, current_convo_id, history_state, conversations_state, raw_prompts_state, deepresearch_on, summary_llm], |
|
|
outputs=[chatbot, history_state, conversation_selector, conversations_state, raw_prompts_state, visited_sources_html, search_sources_html], |
|
|
concurrency_limit=16 |
|
|
).then( |
|
|
fn=lambda: gr.update(value=""), |
|
|
inputs=None, |
|
|
outputs=user_input |
|
|
) |
|
|
|
|
|
|
|
|
clear_button.click( |
|
|
fn=clear_current, |
|
|
inputs=[current_convo_id, conversations_state, raw_prompts_state], |
|
|
outputs=[chatbot, history_state, conversation_selector, conversations_state, raw_prompts_state, visited_sources_html, search_sources_html] |
|
|
) |
|
|
|
|
|
new_convo_button.click( |
|
|
fn=start_new_conversation, |
|
|
inputs=[conversations_state], |
|
|
outputs=[current_convo_id, history_state, conversation_selector, conversations_state, search_button, deepresearch_on] |
|
|
).then( |
|
|
fn=lambda: gr.update(value=""), |
|
|
inputs=None, |
|
|
outputs=visited_sources_html |
|
|
) |
|
|
|
|
|
conversation_selector.change( |
|
|
fn=load_conversation, |
|
|
inputs=[conversation_selector, conversations_state], |
|
|
outputs=[current_convo_id, history_state, chatbot, search_button, deepresearch_on] |
|
|
).then( |
|
|
fn=get_visited_html_for_selected, |
|
|
inputs=[conversation_selector, conversations_state], |
|
|
outputs=visited_sources_html |
|
|
) |
|
|
|
|
|
example1_button.click(fn=lambda: gr.update(value=example_messages["DeepResearch on the latest GST policy"]), inputs=None, outputs=user_input) |
|
|
example2_button.click(fn=lambda: gr.update(value=example_messages["UPSC 2025 Prelims Question"]), inputs=None, outputs=user_input) |
|
|
example3_button.click(fn=lambda: gr.update(value=example_messages["DeepResearch on inflammation in treatments for cancer"]), inputs=None, outputs=user_input) |
|
|
example4_button.click(fn=lambda: gr.update(value=example_messages["Which Indian math model was post-trained for ~$499?"]), inputs=None, outputs=user_input) |
|
|
example5_button.click(fn=lambda: gr.update(value=example_messages["DeepResearch analysis on EV Adoption"]), inputs=None, outputs=user_input) |
|
|
|
|
|
|
|
|
demo.load(fn=pick_random_example, inputs=None, outputs=user_input) |
|
|
|
|
|
|
|
|
sources_sidebar.expand( |
|
|
fn=mark_sources_open, |
|
|
inputs=None, |
|
|
outputs=[sources_visible] |
|
|
) |
|
|
|
|
|
sources_sidebar.collapse( |
|
|
fn=mark_sources_closed, |
|
|
inputs=None, |
|
|
outputs=[sources_visible] |
|
|
) |
|
|
|
|
|
sources_button.click( |
|
|
fn=toggle_sources, |
|
|
inputs=[sources_visible], |
|
|
outputs=[sources_sidebar, sources_visible] |
|
|
) |
|
|
|
|
|
search_button.click( |
|
|
fn=toggle_search, |
|
|
inputs=[deepresearch_on, current_convo_id, conversations_state], |
|
|
outputs=[search_button, deepresearch_on, conversations_state] |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.queue().launch(share=True, ssr_mode=False) |