iloncka's picture
Update app.py
6630c2f
raw
history blame
6.99 kB
import dill
import json
import streamlit as st
import os
from haystack.utils import convert_files_to_docs
from haystack.schema import Answer
from haystack.document_stores import InMemoryDocumentStore
from haystack.pipelines import ExtractiveQAPipeline
from haystack.nodes import FARMReader, TfidfRetriever
import logging
from markdown import markdown
from annotated_text import annotation
from streamlit_lottie import st_lottie
st.set_page_config(page_title="QA-project", page_icon="📇")
os.environ['TOKENIZERS_PARALLELISM'] = "false"
DATA_DIR = './dataset'
NAMES_DICT_PATH = 'mod_names_dict.pkl'
DOCS_PATH = os.path.join(DATA_DIR, 'all_docs_36838.pkl')
LOTTIE_PATH = './img/108423-search-for-documents.json'
PROG_TITLE = "Научные кейсы"
PROG_SUBTITLE = "Рекомендации по существующим в компании компонентам цифровых продуктов для решения новых бизнес-задач"
# Adjust to a question that you would like users to see in the search bar when they load the UI:
DEFAULT_QUESTION_AT_STARTUP = os.getenv("DEFAULT_QUESTION_AT_STARTUP", "Что делает Домашняя бухгалтерия?")
DEFAULT_ANSWER_AT_STARTUP = os.getenv("DEFAULT_ANSWER_AT_STARTUP", "Домашняя бухгалтерия позволяет вести счета в разных валютах")
@st.experimental_memo
def load_dict(path):
with open(path, "rb") as f:
loaded = dill.load(f)
return loaded
@st.experimental_memo
def get_lottie(path):
with open(path, 'r', errors='ignore') as f:
lottie_data = json.load(f)
return lottie_data
def load_and_write_data(document_store):
with open(DOCS_PATH, "rb") as f:
docs = dill.load(f)
document_store.write_documents(docs)
def get_doc_reg_id(result):
if result.get("reg_id", None):
reg_id = result["reg_id"]
return reg_id
return None
# Haystack Components
document_store = InMemoryDocumentStore() # use_bm25=True
load_and_write_data(document_store)
retriever = TfidfRetriever(document_store=document_store)
reader = FARMReader(model_name_or_path="DeepPavlov/rubert-base-cased-sentence",
use_gpu=False,
num_processes=1)
pipeline = ExtractiveQAPipeline(reader, retriever)
def set_state_if_absent(key, value):
if key not in st.session_state:
st.session_state[key] = value
set_state_if_absent("question", DEFAULT_QUESTION_AT_STARTUP)
set_state_if_absent("answer", DEFAULT_ANSWER_AT_STARTUP)
set_state_if_absent("results", None)
set_state_if_absent("predictions", None)
def reset_results(*args):
st.session_state.results = None
# Streamlit App
lottie_data = get_lottie(LOTTIE_PATH)
img, title= st.columns([2,3])
with img:
st_lottie(lottie_data) #, height=350
with title:
st.title(PROG_TITLE)
st.subheader(PROG_SUBTITLE)
st.markdown("""
Это демонстрационная версия сервиса поисковой системы программных продуктов с использованием технологии
[Haystack Extractive QA Pipeline](https://haystack.deepset.ai/components/ready-made-pipelines#extractiveqapipeline)
и [InMemoryDocumentStore](https://haystack.deepset.ai/components/document-store)
Чтобы испытать сервис можно задавать вопросы в свободной форме по функционалу программных продуктов.
""", unsafe_allow_html=True)
question = st.text_input("", value=st.session_state.question, max_chars=100, on_change=reset_results)
mod_names_dict = load_dict(NAMES_DICT_PATH)
def ask_question(question):
prediction = pipeline.run(query=question, params={"Retriever": {"top_k": 10}, "Reader": {"top_k": 5}})
answers = prediction["answers"]
results = []
for answer in answers:
answer = answer.to_dict()
if answer.get("answer", None):
document = [doc for doc in prediction["documents"] if (doc.to_dict()["id"] == answer["document_id"])][0]
results.append(
{
"context": "..." + answer["context"] + "...",
"answer": answer.get("answer", None),
"source": answer["meta"]["name"] if answer["meta"].get("name", None) else answer["meta"]['url'],
"relevance": round(answer["score"] * 100, 2),
"document": document.content,
"doc_score": document.score,
"reg_id": document.meta["reg_id"],
"offset_start_in_doc": answer["offsets_in_document"][0]["start"],
"_raw": answer,
}
)
else:
results.append(
{
"context": None,
"answer": None,
"document": None,
"relevance": round(answer["score"] * 100, 2),
"_raw": answer,
}
)
return results, prediction
if question:
with st.spinner("🕰️    Производится семантический поиск по информационной базе ..."):
try:
msg = 'Asked ' + question
logging.info(msg)
st.session_state.results, st.session_state.predictions = ask_question(question)
except Exception as e:
logging.exception(e)
if st.session_state.results:
st.write('## Результаты')
for count, result in enumerate(st.session_state.results):
if result["answer"]:
answer, context = result["answer"], result["document"]
start_idx = context.find(result["context"])
end_idx = start_idx + len(result["context"])
reg_id = get_doc_reg_id(result)
module_info = ''
if reg_id:
module_name = mod_names_dict.get(reg_id, None)
if module_name:
module_info = f"**Наименование модуля/программы: :orange[{module_name}]**"
else:
module_info = f"Наименование модуля/программы отсутствует!"
st.markdown(f"{module_info} - **Релевантность:** {result['relevance']}")
st.write(
markdown(context[:start_idx] + str(annotation(body=result["context"], label="ANSWER", background="#ff700f", color='#ffffff')) + context[end_idx:]),
unsafe_allow_html=True,
)
st.markdown(f"**Источник:** {result['source']}")
else:
st.info(
"🤔    Поисковая система не справилась с Вашим запросом. Попробуйте его переформулировать!"
)