Spaces:
Sleeping
Sleeping
File size: 2,061 Bytes
1536dad |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
from dataclasses import dataclass
import time
import gc
import streamlit as st
from omegaconf import OmegaConf
import torch
from src.rag_qa import RagQA
CONFIG_PATH = "src/rag_pipeline/conf/inference.yaml"
@st.cache_resource(
show_spinner="Loading models and indices. This might take a while. Go get hydrated..."
)
def get_rag_qa():
gc.collect()
torch.cuda.empty_cache()
conf = OmegaConf.load(CONFIG_PATH)
rag_qa = RagQA(conf)
rag_qa.load()
return rag_qa
left_column, cent_column, last_column = st.columns(3)
with cent_column:
st.image("pittsburgh.webp", width=400)
st.title("Know anything about Pittsburgh")
# Initialize the RagQA model, might be already cached.
_ = get_rag_qa()
# Run QA
st.subheader("Ask away:")
question = st.text_input("Ask away:", "", label_visibility="collapsed")
submit = st.button("Submit")
st.markdown(
"""
> **For example, ask things like:**
>
> Who is the largest employer in Pittsburgh?
> Where is the Smithsonian affiliated regional history museum in Pittsburgh?
> Who is the president of CMU?
---
""",
unsafe_allow_html=False,
)
if submit:
if not question.strip():
st.error("Machine Learning still can't read minds. Please enter a question.")
else:
try:
with st.spinner("Combing through 20,000+ documents from 14,000+ URLs..."):
answer, sources = get_rag_qa().answer(question)
st.subheader("Answer:")
st.write(answer)
st.write("")
with st.expander("Show Sources"):
st.subheader("Sources:")
for i, source in enumerate(sources):
st.markdown(f"**Name:** {source.name}")
st.markdown(f"**Index ID:** {source.index_id}")
st.markdown(f"**Text:**")
st.write(source.text)
if i < len(sources) - 1:
st.markdown("---")
except Exception as e:
st.error(f"An error occurred: {e}")
|