﻿import os
import sys
import urllib.parse
from typing import Dict, List, Optional

from fastapi import FastAPI, Request, Form
from fastapi.responses import HTMLResponse, StreamingResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
import subprocess
import threading
import uuid
import json
import re

try:
    import google.generativeai as genai  # type: ignore
except Exception:
    genai = None


app = FastAPI(title="Auto Blog Pipeline UI")

BASE_DIR = os.path.dirname(os.path.abspath(__file__))
TEMPLATE_DIR = os.path.join(BASE_DIR, "templates")
os.makedirs(TEMPLATE_DIR, exist_ok=True)

app.mount("/static", StaticFiles(directory=os.path.join(BASE_DIR, "static")), name="static")
templates = Jinja2Templates(directory=TEMPLATE_DIR)

# Load environment variables for this web process so Gemini helpers can work
try:
    from dotenv import load_dotenv  # type: ignore
    load_dotenv(os.path.join(BASE_DIR, ".env"))
except Exception:
    pass


# In-memory storage for batch tasks (ephemeral within process lifecycle)
BATCH_TASKS: dict[str, dict] = {}

# Default WeCom webhook; can be overridden by env var WECOM_WEBHOOK
DEFAULT_WECOM_WEBHOOK = "https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=a61d19e4-6506-4fbe-a7a1-2cd0b9815c61"


@app.get("/", response_class=HTMLResponse)
def index(request: Request):
    return templates.TemplateResponse(
        "index.html",
        {
            "request": request,
        },
    )


@app.post("/run", response_class=HTMLResponse)
def run_task(
    request: Request,
    query: str = Form("") ,
    theme: str = Form(""),
    browser: str = Form("chromium"),
    skip_crawl: Optional[bool] = Form(False),
    max_links: Optional[int] = Form(None),
    start: Optional[int] = Form(None),
    end: Optional[int] = Form(None),
    crawler_timeout: Optional[int] = Form(30),
    crawler_settle: Optional[float] = Form(0.5),
    crawler_workers: Optional[int] = Form(1),
    crawler_headless: Optional[bool] = Form(False),
    crawler_page_load_strategy: Optional[str] = Form("normal"),
    crawler_user_data_dir: Optional[str] = Form(None),
    crawler_raw_html_out: Optional[str] = Form(None),
    compose_direct: Optional[bool] = Form(False),
    wecom_send: Optional[bool] = Form(False),
    wecom_webhook: Optional[str] = Form(None),
):
    # Build querystring for the streaming endpoint
    params: Dict[str, str] = {}
    if query:
        params["query"] = query
    if theme:
        params["theme"] = theme
    if browser:
        params["browser"] = browser
    if skip_crawl:
        params["skip_crawl"] = "1"
    if compose_direct:
        params["compose_direct"] = "1"
    if max_links is not None and str(max_links).strip():
        params["max_links"] = str(max_links)
    if start is not None and str(start).strip():
        params["start"] = str(start)
    if end is not None and str(end).strip():
        params["end"] = str(end)
    if crawler_timeout is not None:
        params["crawler_timeout"] = str(crawler_timeout)
    if crawler_settle is not None:
        params["crawler_settle"] = str(crawler_settle)
    if crawler_workers is not None:
        params["crawler_workers"] = str(crawler_workers)
    if crawler_headless:
        params["crawler_headless"] = "1"
    if crawler_page_load_strategy:
        params["crawler_page_load_strategy"] = crawler_page_load_strategy
    if crawler_user_data_dir:
        params["crawler_user_data_dir"] = crawler_user_data_dir
    if crawler_raw_html_out:
        params["crawler_raw_html_out"] = crawler_raw_html_out
    if wecom_send:
        params["wecom_send"] = "1"
    if wecom_webhook:
        params["wecom_webhook"] = wecom_webhook

    qs = urllib.parse.urlencode(params)
    return templates.TemplateResponse(
        "watch.html",
        {
            "request": request,
            "qs": qs,
            "params": params,
        },
    )


@app.post("/run-batch", response_class=HTMLResponse)
def run_batch(
    request: Request,
    themes: str = Form(""),
    use_gemini: Optional[bool] = Form(False),
    # reuse knobs
    browser: str = Form("chromium"),
    skip_crawl: Optional[bool] = Form(False),
    compose_direct: Optional[bool] = Form(False),
    max_links: Optional[int] = Form(None),
    start: Optional[int] = Form(None),
    end: Optional[int] = Form(None),
    crawler_timeout: Optional[int] = Form(30),
    crawler_settle: Optional[float] = Form(0.5),
    crawler_workers: Optional[int] = Form(1),
    crawler_headless: Optional[bool] = Form(False),
    crawler_page_load_strategy: Optional[str] = Form("normal"),
    crawler_user_data_dir: Optional[str] = Form(None),
    crawler_raw_html_out: Optional[str] = Form(None),
    wecom_send: Optional[bool] = Form(False),
    wecom_webhook: Optional[str] = Form(None),
):
    theme_lines = [ln.strip() for ln in (themes or "").splitlines() if ln.strip()]
    if not theme_lines:
        return templates.TemplateResponse(
            "index.html",
            {"request": request, "error": "Please enter at least one theme"},
            status_code=400,
        )
    task_id = uuid.uuid4().hex[:12]
    BATCH_TASKS[task_id] = {
        "themes": theme_lines,
        "use_gemini": bool(use_gemini),
        "opts": {
            "browser": browser,
            "skip_crawl": bool(skip_crawl),
            "compose_direct": bool(compose_direct),
            "max_links": max_links,
            "start": start,
            "end": end,
            "crawler_timeout": crawler_timeout,
            "crawler_settle": crawler_settle,
            "crawler_workers": crawler_workers,
            "crawler_headless": bool(crawler_headless),
            "crawler_page_load_strategy": crawler_page_load_strategy,
            "crawler_user_data_dir": crawler_user_data_dir,
            "crawler_raw_html_out": crawler_raw_html_out,
            "wecom_send": bool(wecom_send),
            "wecom_webhook": wecom_webhook,
        },
    }
    # Render batch watch page with task id and count for SSE to attach
    return templates.TemplateResponse(
        "watch_batch.html",
        {"request": request, "task_id": task_id, "count": len(theme_lines)},
    )


@app.post("/gen-query-theme")
async def gen_query_theme(request: Request):
    """Generate (query, theme) from a user topic and return JSON.
    Accepts JSON body: {"topic": "..."} or form field 'topic'.
    """
    topic: Optional[str] = None
    try:
        ct = request.headers.get("content-type", "")
        if "application/json" in ct.lower():
            data = await request.json()
            if isinstance(data, dict):
                topic = str(data.get("topic") or "").strip()
        else:
            # form fallback
            form = await request.form()
            topic = str(form.get("topic") or "").strip()
    except Exception:
        topic = None

    if not topic:
        return {"ok": False, "error": "missing topic", "query": "", "theme": ""}

    try:
        q, t = _gen_query_and_theme(topic)
        return {"ok": True, "query": q or topic, "theme": t or topic}
    except Exception as e:
        return {"ok": False, "error": str(e), "query": topic, "theme": topic}


def _sse_line(data: str) -> bytes:
    return (f"data: {data}\n\n").encode("utf-8", errors="replace")


def _clean_markdown_text(src: str) -> str:
    lines = (src or "").splitlines()
    start = 0
    for i, raw in enumerate(lines):
        s = raw.lstrip("\ufeff").lstrip()
        if re.match(r"^#\s+", s) and not s.startswith("##"):
            start = i
            break
        m = re.match(r"^<h1(?:\s+[^>]*)?>([\s\S]*?)</h1>\s*$", s, flags=re.IGNORECASE)
        if m:
            start = i
            break
    out = lines[start:]
    # drop trailing blank lines
    while out and out[-1].strip() == "":
        out.pop()
    # drop trailing 'tokens used:'
    if out and re.search(r"tokens\s+used:\s*", out[-1], flags=re.IGNORECASE):
        out.pop()
    return "\n".join(out) + ("\n" if out else "")


def _clean_markdown_file(path: str) -> bool:
    try:
        txt = open(path, "r", encoding="utf-8", errors="ignore").read()
        cleaned = _clean_markdown_text(txt)
        open(path, "w", encoding="utf-8", errors="ignore").write(cleaned)
        return True
    except Exception:
        return False

def _find_first_h1(markdown: str) -> Optional[str]:
    lines = (markdown or "").splitlines()
    for raw in lines:
        s = raw.lstrip("\ufeff").lstrip()
        if re.match(r"^#\s+", s) and not s.startswith("##"):
            return s[1:].strip()
        m = re.match(r"^<h1(?:\s+[^>]*)?>([\s\S]*?)</h1>\s*$", s, flags=re.IGNORECASE)
        if m:
            # strip any inner tags
            inner = re.sub(r"<[^>]+>", "", m.group(1)).strip()
            return inner
    return None


def _sanitize_display_name(name: str) -> str:
    # Remove illegal filesystem chars and collapse whitespace
    cleaned = re.sub(r"[\\/:*?\"<>|]", " ", name).strip()
    cleaned = re.sub(r"\s+", " ", cleaned)
    # length guard
    if len(cleaned) > 120:
        cleaned = cleaned[:120].rstrip()
    return cleaned or "untitled"


def _derive_display_name_from_file(path: str) -> str:
    try:
        txt = open(path, "r", encoding="utf-8", errors="ignore").read()
        h1 = _find_first_h1(txt)
        if h1:
            return _sanitize_display_name(h1)
    except Exception:
        pass
    # Fallback to filename sans extension
    base = os.path.basename(path)
    name, _ = os.path.splitext(base)
    return _sanitize_display_name(name)


def _send_wecom_file(path: str, webhook_override: Optional[str] = None, display_name: Optional[str] = None) -> tuple[bool, str]:
    try:
        from wecom_file_sender import WeComFileSender  # type: ignore
    except Exception as e:
        return False, f"wecom module import error: {e}"
    try:
        webhook = webhook_override or os.environ.get("WECOM_WEBHOOK", DEFAULT_WECOM_WEBHOOK)
        sender = WeComFileSender(webhook=webhook, timeout=15, disable_proxy=True)
        dn = display_name or _derive_display_name_from_file(path)
        sender.send_markdown_file(path, display_name=dn + ".md")
        return True, "ok"
    except Exception as e:
        return False, str(e)


@app.get("/stream")
def stream(query: Optional[str] = None,
           theme: Optional[str] = None,
           browser: Optional[str] = None,
           skip_crawl: Optional[str] = None,
           compose_direct: Optional[str] = None,
           max_links: Optional[int] = None,
           start: Optional[int] = None,
           end: Optional[int] = None,
           crawler_timeout: Optional[int] = 30,
           crawler_settle: Optional[float] = 0.5,
           crawler_workers: Optional[int] = None,
           crawler_headless: Optional[str] = None,
           crawler_page_load_strategy: Optional[str] = None,
           crawler_user_data_dir: Optional[str] = None,
           crawler_raw_html_out: Optional[str] = None,
           wecom_send: Optional[str] = None,
           wecom_webhook: Optional[str] = None):
    # Build the command for the pipeline
    py = os.environ.get("PYTHON", sys.executable or "python")
    cmd: List[str] = [py, "-u", "auto_blog_pipeline.py"]
    if query:
        cmd += ["--query", query]
    if theme:
        cmd += ["--theme", theme]
    if browser:
        cmd += ["--browser", browser]
    if skip_crawl:
        cmd += ["--skip-crawl"]
    if compose_direct:
        cmd += ["--compose-direct"]
    if isinstance(max_links, int):
        cmd += ["--max-links", str(max_links)]
    if isinstance(start, int):
        cmd += ["--start", str(start)]
    if isinstance(end, int):
        cmd += ["--end", str(end)]
    if isinstance(crawler_timeout, int):
        cmd += ["--crawler-timeout", str(crawler_timeout)]
    if isinstance(crawler_settle, float) or isinstance(crawler_settle, int):
        cmd += ["--crawler-settle", str(crawler_settle)]
    if isinstance(crawler_workers, int):
        cmd += ["--crawler-workers", str(crawler_workers)]
    if crawler_headless:
        cmd += ["--crawler-headless"]
    if crawler_page_load_strategy:
        cmd += ["--crawler-page-load-strategy", crawler_page_load_strategy]
    if crawler_user_data_dir:
        cmd += ["--crawler-user-data-dir", crawler_user_data_dir]
    if crawler_raw_html_out:
        cmd += ["--crawler-raw-html-out", crawler_raw_html_out]
    # WeCom integration: pass through to pipeline so it sends final DOCX
    if wecom_send:
        cmd += ["--wecom-send"]
    # If webhook not provided, fallback to env or DEFAULT
    if wecom_send and not wecom_webhook:
        wecom_webhook = os.environ.get("WECOM_WEBHOOK", DEFAULT_WECOM_WEBHOOK)
    if wecom_webhook:
        cmd += ["--wecom-webhook", wecom_webhook]

    def event_generator():
        # Start subprocess and stream combined stdout/stderr
        # Emit supplied query/theme for visibility at top
        if theme:
            yield _sse_line("THEME: " + str(theme))
        if query:
            yield _sse_line("QUERY: " + str(query))

        env = os.environ.copy()
        env.setdefault("PYTHONUNBUFFERED", "1")
        env.setdefault("PYTHONIOENCODING", "utf-8")
        proc = subprocess.Popen(
            cmd,
            stdout=subprocess.PIPE,
            stderr=subprocess.STDOUT,
            text=True,
            bufsize=1,
            universal_newlines=True,
            encoding="utf-8",
            errors="replace",
            env=env,
        )

        # Ensure process is terminated if client disconnects
        killed = False
        def _kill():
            nonlocal killed
            if proc and proc.poll() is None:
                try:
                    proc.kill()
                    killed = True
                except Exception:
                    pass

        try:
            yield _sse_line("START pipeline")
            yield _sse_line("CMD: " + " ".join(cmd))
            if proc.stdout:
                for line in iter(proc.stdout.readline, ""):
                    txt = line.rstrip("\n")
                    yield _sse_line(txt)
                    # Intercept final blog path and clean it
                    if txt.startswith("[INFO] Final blog saved -> "):
                        out_path = txt.split("->", 1)[-1].strip()
                        if _clean_markdown_file(out_path):
                            yield _sse_line(f"CLEANED blog -> {out_path}")
                            # If WeCom is requested, the pipeline will send the final DOCX (or MD fallback).
                            # Avoid duplicate sending here.
                            if wecom_send:
                                yield _sse_line("WECOM: delegated to pipeline (docx preferred)")
                    if txt.startswith("[INFO] Split output -> "):
                        try:
                            # format: Split output -> steps: <p1> | content: <p2>
                            m = re.search(r"steps:\s*(.*?)\s*\|\s*content:\s*(.*)$", txt)
                            if m:
                                p1, p2 = m.group(1), m.group(2)
                                for p in (p1, p2):
                                    p = p.strip()
                                    if p and os.path.exists(p):
                                        try:
                                            os.remove(p)
                                            yield _sse_line(f"REMOVED {p}")
                                        except Exception:
                                            pass
                        except Exception:
                            pass
                    if proc.poll() is not None:
                        break
            rc = proc.wait()
            yield _sse_line(f"EXIT {rc}")
        except GeneratorExit:
            _kill()
            raise
        except Exception as e:
            yield _sse_line(f"ERROR: {e}")
            _kill()
        finally:
            try:
                if proc.stdout:
                    proc.stdout.close()
            except Exception:
                pass

    headers = {
        "Cache-Control": "no-cache",
        "X-Accel-Buffering": "no",
        "Connection": "keep-alive",
    }
    return StreamingResponse(event_generator(), media_type="text/event-stream", headers=headers)


def _gen_query_and_theme(input_theme: str) -> tuple[str, str]:
    """Generate (query, theme) from an input topic using existing Gemini helper.

    Priority:
    1) use_gemini_search.generate_topic_and_keywords (supports AIHUBMIX + Gemini fallback)
    2) fallback to google-generativeai directly if available
    3) fallback to (input,input)
    """
    base = input_theme.strip()
    q = base
    t = base
    # 1) Try project helper
    try:
        from use_gemini_search import generate_topic_and_keywords  # type: ignore
        import os as _os
        api_key = _os.environ.get("AIHUBMIX_API_KEY")
        data = generate_topic_and_keywords(base, api_key)
        if isinstance(data, dict):
            # pick theme field dynamically
            t = base
            try:
                # Prefer fields containing title/theme
                for k, v in data.items():
                    if isinstance(k, str) and isinstance(v, str):
                        kl = k.lower()
                        if ("title" in kl) or ("theme" in kl):
                            if v.strip():
                                t = v.strip()
                                break
                # Fallback: any reasonable string field
                if t == base:
                    for k, v in data.items():
                        if isinstance(v, str) and 3 <= len(v.strip()) <= 120:
                            t = v.strip()
                            break
            except Exception:
                pass
            # pick first keywords list dynamically
            kws = None
            try:
                for k, v in data.items():
                    if isinstance(v, list) and v and all(isinstance(x, str) for x in v):
                        kws = v
                        break
            except Exception:
                pass
            if kws and isinstance(kws[0], str) and kws[0].strip():
                q = kws[0].strip()
            return q, t
    except Exception:
        pass

    # 2) Try direct google.generativeai
    try:
        if genai is not None:
            import os
            api_key = os.environ.get("GEMINI_API_KEY")
            if api_key:
                genai.configure(api_key=api_key)
                model = genai.GenerativeModel("gemini-1.5-flash")
                # Improved prompt for single best query + theme
                prompt = (
                    "You are a search strategist and editorial planner. Given a user topic (could be Chinese or English), "
                    "produce JSON with two fields: query and theme.\n\n"
                    "Constraints for query (single best search phrase):\n"
                    "- English-only, ASCII only; 10-14 words.\n"
                    "- Extract core domain vocabulary from the topic; include 1-2 specific terms (not generic).\n"
                    "- Add social/UGC exclusions: -site:pinterest.com -site:youtube.com -site:reddit.com -site:tiktok.com -site:instagram.com -site:medium.com -site:quora.com\n"
                    "- If the topic implies professional/authoritative evidence (e.g., theory, standards, materials, science), prefer hints like site:edu OR site:gov OR filetype:pdf (only if appropriate).\n"
                    "- No quotes, no emojis, no pipes/brackets; no trailing punctuation; avoid duplicate words; avoid generic wording.\n\n"
                    "Constraints for theme (final H1 idea):\n"
                    "- English-only, 8-14 words; specific, benefit-driven, and with a unique angle (method/constraint/contrarian).\n"
                    "- Reflects the same focus implied by the query.\n"
                    "- No brand names; avoid 'Discover' / 'Explore'; no punctuation clutter.\n\n"
                    "Return JSON only:\n{\"query\": \"...\", \"theme\": \"...\"}\n\n"
                    "Topic:\n" + base
                )
                resp = model.generate_content(prompt)
                text = getattr(resp, "text", "").strip()
                if text.startswith("```") and text.endswith("```"):
                    text = text.strip("`").strip()
                    if text.lower().startswith("json"):
                        text = text[4:].strip()
                data = json.loads(text)
                q = str(data.get("query") or q).strip()
                t = str(data.get("theme") or t).strip()
                return q, t
    except Exception:
        pass

    # 3) Fallback
    return q, t


@app.get("/stream-batch")
def stream_batch(task_id: str):
    payload = BATCH_TASKS.get(task_id)
    if not payload:
        return StreamingResponse(iter([_sse_line("ERROR: invalid task id")] ), media_type="text/event-stream")

    def event_generator():
        themes: list[str] = payload.get("themes", [])
        use_gemini: bool = payload.get("use_gemini", False)
        opts = payload.get("opts", {})

        py = os.environ.get("PYTHON", sys.executable or "python")
        yield _sse_line(f"BATCH START {len(themes)} tasks")
        for idx, input_theme in enumerate(themes, start=1):
            try:
                if use_gemini:
                    # Generate optimized query/theme via Gemini helpers
                    query, theme = _gen_query_and_theme(input_theme)
                    # Log generation result explicitly for visibility
                    yield _sse_line(f"GEMINI INPUT: {input_theme}")
                    if (query or "").strip() != input_theme.strip() or (theme or "").strip() != input_theme.strip():
                        yield _sse_line(f"GEMINI OUTPUT -> QUERY: {query} | THEME: {theme}")
                    else:
                        yield _sse_line("GEMINI FALLBACK: using original topic as query/theme (check API keys/network)")
                else:
                    query = input_theme
                    theme = input_theme
                yield _sse_line(f"TASK {idx}/{len(themes)} THEME: {theme}")
                yield _sse_line(f"TASK {idx}/{len(themes)} QUERY: {query}")
                cmd: list[str] = [py, "-u", "auto_blog_pipeline.py", "--theme", theme, "--query", query]
                if opts.get("browser"):
                    cmd += ["--browser", opts["browser"]]
                if opts.get("skip_crawl"):
                    cmd += ["--skip-crawl"]
                if opts.get("compose_direct"):
                    cmd += ["--compose-direct"]
                if opts.get("max_links") is not None:
                    cmd += ["--max-links", str(opts["max_links"])]
                if opts.get("start") is not None:
                    cmd += ["--start", str(opts["start"])]
                if opts.get("end") is not None:
                    cmd += ["--end", str(opts["end"])]
                if opts.get("crawler_timeout") is not None:
                    cmd += ["--crawler-timeout", str(opts["crawler_timeout"])]
                if opts.get("crawler_settle") is not None:
                    cmd += ["--crawler-settle", str(opts["crawler_settle"])]
                if opts.get("crawler_workers") is not None:
                    cmd += ["--crawler-workers", str(opts["crawler_workers"])]
                if opts.get("crawler_headless"):
                    cmd += ["--crawler-headless"]
                if opts.get("crawler_page_load_strategy"):
                    cmd += ["--crawler-page-load-strategy", opts["crawler_page_load_strategy"]]
                if opts.get("crawler_user_data_dir"):
                    cmd += ["--crawler-user-data-dir", opts["crawler_user_data_dir"]]
                if opts.get("crawler_raw_html_out"):
                    cmd += ["--crawler-raw-html-out", opts["crawler_raw_html_out"]]

                env = os.environ.copy()
                env.setdefault("PYTHONUNBUFFERED", "1")
                env.setdefault("PYTHONIOENCODING", "utf-8")
                proc = subprocess.Popen(
                    cmd,
                    stdout=subprocess.PIPE,
                    stderr=subprocess.STDOUT,
                    text=True,
                    bufsize=1,
                    universal_newlines=True,
                    encoding="utf-8",
                    errors="replace",
                    env=env,
                )
                yield _sse_line("CMD: " + " ".join(cmd))
                if proc.stdout:
                    for line in iter(proc.stdout.readline, ""):
                        txt = line.rstrip("\n")
                        yield _sse_line(f"[{idx}/{len(themes)}] " + txt)
                        # Intercept and clean blog file
                        if txt.startswith("[INFO] Final blog saved -> "):
                            out_path = txt.split("->", 1)[-1].strip()
                            if _clean_markdown_file(out_path):
                                yield _sse_line(f"[{idx}/{len(themes)}] CLEANED blog -> {out_path}")
                                if opts.get("wecom_send"):
                                    ok, msg = _send_wecom_file(out_path, webhook_override=opts.get("wecom_webhook"))
                                    if ok:
                                        yield _sse_line(f"[{idx}/{len(themes)}] WECOM SENT -> {out_path}")
                                    else:
                                        yield _sse_line(f"[{idx}/{len(themes)}] WECOM ERROR: {msg}")
                        # Remove split files if produced
                        if txt.startswith("[INFO] Split output -> "):
                            try:
                                m = re.search(r"steps:\s*(.*?)\s*\|\s*content:\s*(.*)$", txt)
                                if m:
                                    p1, p2 = m.group(1), m.group(2)
                                    for p in (p1, p2):
                                        p = p.strip()
                                        if p and os.path.exists(p):
                                            try:
                                                os.remove(p)
                                                yield _sse_line(f"[{idx}/{len(themes)}] REMOVED {p}")
                                            except Exception:
                                                pass
                            except Exception:
                                pass
                        if proc.poll() is not None:
                            break
                rc = proc.wait()
                yield _sse_line(f"TASK EXIT {idx} {rc}")
            except Exception as e:
                yield _sse_line(f"TASK ERROR {idx}: {e}")
        yield _sse_line("BATCH EXIT 0")

    headers = {
        "Cache-Control": "no-cache",
        "X-Accel-Buffering": "no",
        "Connection": "keep-alive",
    }
    return StreamingResponse(event_generator(), media_type="text/event-stream", headers=headers)


@app.get("/health")
def health():
    return {"ok": True}


if __name__ == "__main__":
    import uvicorn
    port = int(os.environ.get("PORT", "8111"))
    uvicorn.run("web_server:app", host="0.0.0.0", port=port, reload=True)




