Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
ts startup
Browse files
app/__pycache__/models.cpython-311.pyc
CHANGED
|
Binary files a/app/__pycache__/models.cpython-311.pyc and b/app/__pycache__/models.cpython-311.pyc differ
|
|
|
app/__pycache__/nodes.cpython-311.pyc
CHANGED
|
Binary files a/app/__pycache__/nodes.cpython-311.pyc and b/app/__pycache__/nodes.cpython-311.pyc differ
|
|
|
app/__pycache__/utils.cpython-311.pyc
CHANGED
|
Binary files a/app/__pycache__/utils.cpython-311.pyc and b/app/__pycache__/utils.cpython-311.pyc differ
|
|
|
app/main.py
CHANGED
|
@@ -387,7 +387,7 @@ def process_query_core(
|
|
| 387 |
else:
|
| 388 |
return f"Error: {str(e)}"
|
| 389 |
|
| 390 |
-
|
| 391 |
async def process_query_gradio_streaming(query: str, file_upload, reports_filter: str = "", sources_filter: str = "",
|
| 392 |
subtype_filter: str = "", year_filter: str = ""):
|
| 393 |
"""Streaming version of process_query_gradio that yields partial results"""
|
|
@@ -469,7 +469,7 @@ async def process_query_gradio_streaming(query: str, file_upload, reports_filter
|
|
| 469 |
logger.error(f"Streaming pipeline failed: {str(e)}")
|
| 470 |
yield {"type": "error", "content": f"Error: {str(e)}"}
|
| 471 |
|
| 472 |
-
|
| 473 |
async def chatui_adapter(data):
|
| 474 |
"""Updated to return content without SSE formatting for LangServe"""
|
| 475 |
try:
|
|
@@ -547,36 +547,36 @@ def process_query_langserve(input_data: ChatFedInput) -> ChatFedOutput:
|
|
| 547 |
return ChatFedOutput(result=result["result"], metadata=result["metadata"])
|
| 548 |
|
| 549 |
# GRADIO TEST UI
|
| 550 |
-
def create_gradio_interface():
|
| 551 |
-
|
| 552 |
-
|
| 553 |
-
|
| 554 |
-
|
| 555 |
-
|
| 556 |
-
|
| 557 |
-
|
| 558 |
-
|
| 559 |
|
| 560 |
-
|
| 561 |
-
|
| 562 |
-
|
| 563 |
-
|
| 564 |
-
|
| 565 |
|
| 566 |
-
|
| 567 |
|
| 568 |
-
|
| 569 |
-
|
| 570 |
-
|
| 571 |
-
|
| 572 |
-
|
| 573 |
-
|
| 574 |
-
|
| 575 |
-
|
| 576 |
-
|
| 577 |
-
|
| 578 |
|
| 579 |
-
|
| 580 |
|
| 581 |
@asynccontextmanager
|
| 582 |
async def lifespan(app: FastAPI):
|
|
@@ -798,16 +798,16 @@ add_routes(
|
|
| 798 |
)
|
| 799 |
|
| 800 |
if __name__ == "__main__":
|
| 801 |
-
# Create Gradio interface
|
| 802 |
-
demo = create_gradio_interface()
|
| 803 |
|
| 804 |
# Mount Gradio app to FastAPI
|
| 805 |
-
app = gr.mount_gradio_app(app, demo, path="/gradio")
|
| 806 |
|
| 807 |
-
host = os.getenv("HOST", "
|
| 808 |
port = int(os.getenv("PORT", "7860"))
|
| 809 |
|
| 810 |
logger.info(f"Starting FastAPI server on {host}:{port}")
|
| 811 |
-
logger.info(f"Gradio UI available at: http://{host}:{port}/gradio")
|
| 812 |
|
| 813 |
uvicorn.run(app, host=host, port=port, log_level="info", access_log=True)
|
|
|
|
| 387 |
else:
|
| 388 |
return f"Error: {str(e)}"
|
| 389 |
|
| 390 |
+
|
| 391 |
async def process_query_gradio_streaming(query: str, file_upload, reports_filter: str = "", sources_filter: str = "",
|
| 392 |
subtype_filter: str = "", year_filter: str = ""):
|
| 393 |
"""Streaming version of process_query_gradio that yields partial results"""
|
|
|
|
| 469 |
logger.error(f"Streaming pipeline failed: {str(e)}")
|
| 470 |
yield {"type": "error", "content": f"Error: {str(e)}"}
|
| 471 |
|
| 472 |
+
|
| 473 |
async def chatui_adapter(data):
|
| 474 |
"""Updated to return content without SSE formatting for LangServe"""
|
| 475 |
try:
|
|
|
|
| 547 |
return ChatFedOutput(result=result["result"], metadata=result["metadata"])
|
| 548 |
|
| 549 |
# GRADIO TEST UI
|
| 550 |
+
# def create_gradio_interface():
|
| 551 |
+
# with gr.Blocks(title="ChatFed Orchestrator") as demo:
|
| 552 |
+
# gr.Markdown("# ChatFed Orchestrator")
|
| 553 |
+
# gr.Markdown("Upload documents (PDF/DOCX/GeoJSON) alongside your queries for enhanced context. MCP endpoints available at `/gradio_api/mcp/sse`")
|
| 554 |
+
|
| 555 |
+
# with gr.Row():
|
| 556 |
+
# with gr.Column():
|
| 557 |
+
# query_input = gr.Textbox(label="Query", lines=2, placeholder="Enter your question...")
|
| 558 |
+
# file_input = gr.File(label="Upload Document (PDF/DOCX/GeoJSON)", file_types=[".pdf", ".docx", ".geojson", ".json"])
|
| 559 |
|
| 560 |
+
# with gr.Accordion("Filters (Optional)", open=False):
|
| 561 |
+
# reports_filter_input = gr.Textbox(label="Reports Filter", placeholder="e.g., annual_reports")
|
| 562 |
+
# sources_filter_input = gr.Textbox(label="Sources Filter", placeholder="e.g., internal")
|
| 563 |
+
# subtype_filter_input = gr.Textbox(label="Subtype Filter", placeholder="e.g., financial")
|
| 564 |
+
# year_filter_input = gr.Textbox(label="Year Filter", placeholder="e.g., 2024")
|
| 565 |
|
| 566 |
+
# submit_btn = gr.Button("Submit", variant="primary")
|
| 567 |
|
| 568 |
+
# with gr.Column():
|
| 569 |
+
# output = gr.Textbox(label="Response", lines=15, show_copy_button=True)
|
| 570 |
+
|
| 571 |
+
# # Use streaming function
|
| 572 |
+
# submit_btn.click(
|
| 573 |
+
# fn=process_query_gradio_streaming,
|
| 574 |
+
# inputs=[query_input, file_input, reports_filter_input, sources_filter_input,
|
| 575 |
+
# subtype_filter_input, year_filter_input],
|
| 576 |
+
# outputs=output
|
| 577 |
+
# )
|
| 578 |
|
| 579 |
+
# return demo
|
| 580 |
|
| 581 |
@asynccontextmanager
|
| 582 |
async def lifespan(app: FastAPI):
|
|
|
|
| 798 |
)
|
| 799 |
|
| 800 |
if __name__ == "__main__":
|
| 801 |
+
# # Create Gradio interface
|
| 802 |
+
# demo = create_gradio_interface()
|
| 803 |
|
| 804 |
# Mount Gradio app to FastAPI
|
| 805 |
+
# app = gr.mount_gradio_app(app, demo, path="/gradio")
|
| 806 |
|
| 807 |
+
host = os.getenv("HOST", "0.0.0.0")
|
| 808 |
port = int(os.getenv("PORT", "7860"))
|
| 809 |
|
| 810 |
logger.info(f"Starting FastAPI server on {host}:{port}")
|
| 811 |
+
# logger.info(f"Gradio UI available at: http://{host}:{port}/gradio")
|
| 812 |
|
| 813 |
uvicorn.run(app, host=host, port=port, log_level="info", access_log=True)
|