Jai12345 commited on
Commit
afc7ab9
1 Parent(s): b66b798

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +174 -110
app.py CHANGED
@@ -1,116 +1,180 @@
1
- import streamlit as st
2
- import os
3
- from haystack.utils import fetch_archive_from_http, clean_wiki_text, convert_files_to_docs
4
- from haystack.schema import Answer
5
- from haystack.document_stores import InMemoryDocumentStore
6
- from haystack.pipelines import ExtractiveQAPipeline
7
- from haystack.nodes import FARMReader, TfidfRetriever
8
- import logging
9
- from markdown import markdown
10
- from annotated_text import annotation
11
- from PIL import Image
12
-
13
- os.environ['TOKENIZERS_PARALLELISM'] = "false"
14
-
15
-
16
- # Haystack Components
17
- @st.cache(hash_funcs={"builtins.SwigPyObject": lambda _: None}, allow_output_mutation=True)
18
- def start_haystack():
19
- document_store = InMemoryDocumentStore()
20
- load_and_write_data(document_store)
21
- retriever = TfidfRetriever(document_store=document_store)
22
- reader = FARMReader(model_name_or_path="deepset/roberta-base-squad2-distilled", use_gpu=True)
23
- pipeline = ExtractiveQAPipeline(reader, retriever)
24
- return pipeline
25
 
 
26
 
27
- def load_and_write_data(document_store):
28
- doc_dir = './amazon_help_docs'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  docs = convert_files_to_docs(dir_path=doc_dir, clean_func=clean_wiki_text, split_paragraphs=True)
 
 
30
 
 
31
  document_store.write_documents(docs)
32
 
33
-
34
- pipeline = start_haystack()
35
-
36
-
37
- def set_state_if_absent(key, value):
38
- if key not in st.session_state:
39
- st.session_state[key] = value
40
-
41
-
42
- set_state_if_absent("question", "What is amazon music?")
43
- set_state_if_absent("results", None)
44
-
45
-
46
- def reset_results(*args):
47
- st.session_state.results = None
48
-
49
-
50
- # Streamlit App
51
-
52
-
53
-
54
- st.markdown("""
55
- This QA demo uses a [Haystack Extractive QA Pipeline](https://haystack.deepset.ai/components/ready-made-pipelines#extractiveqapipeline) with
56
- an [InMemoryDocumentStore](https://haystack.deepset.ai/components/document-store) which contains documents about Game of Thrones 👑
57
- Go ahead and ask questions about the marvellous kingdom!
58
- """, unsafe_allow_html=True)
59
-
60
- question = st.text_input("", value=st.session_state.question, max_chars=100, on_change=reset_results)
61
-
62
-
63
- def ask_question(question):
64
- prediction = pipeline.run(query=question, params={"Retriever": {"top_k": 10}, "Reader": {"top_k": 5}})
65
- results = []
66
- for answer in prediction["answers"]:
67
- answer = answer.to_dict()
68
- if answer["answer"]:
69
- results.append(
70
- {
71
- "context": "..." + answer["context"] + "...",
72
- "answer": answer["answer"],
73
- "relevance": round(answer["score"] * 100, 2),
74
- "offset_start_in_doc": answer["offsets_in_document"][0]["start"],
75
- }
76
- )
77
- else:
78
- results.append(
79
- {
80
- "context": None,
81
- "answer": None,
82
- "relevance": round(answer["score"] * 100, 2),
83
- }
84
- )
85
- return results
86
-
87
-
88
- if question:
89
- with st.spinner("👑    Performing semantic search on royal scripts..."):
90
- try:
91
- msg = 'Asked ' + question
92
- logging.info(msg)
93
- st.session_state.results = ask_question(question)
94
- except Exception as e:
95
- logging.exception(e)
96
-
97
- if st.session_state.results:
98
- st.write('## Top Results')
99
- for count, result in enumerate(st.session_state.results):
100
- if result["answer"]:
101
- answer, context = result["answer"], result["context"]
102
- start_idx = context.find(answer)
103
- end_idx = start_idx + len(answer)
104
- st.write(
105
- markdown(context[:start_idx] + str(
106
- annotation(body=answer, label="ANSWER", background="#964448", color='#ffffff')) + context[
107
- end_idx:]),
108
- unsafe_allow_html=True,
109
- )
110
- st.markdown(f"**Relevance:** {result['relevance']}")
111
- else:
112
- st.info(
113
- "🤔    Haystack is unsure whether any of the documents contain an answer to your question. Try to reformulate it!"
114
- )
115
-
116
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ## Task: Question Answering for Game of Thrones
2
+ #
3
+ # Question Answering can be used in a variety of use cases. A very common one: Using it to navigate through complex
4
+ # knowledge bases or long documents ("search setting").
5
+ #
6
+ # A "knowledge base" could for example be your website, an internal wiki or a collection of financial reports.
7
+ # In this tutorial we will work on a slightly different domain: "Game of Thrones".
8
+ #
9
+ # Let's see how we can use a bunch of Wikipedia articles to answer a variety of questions about the
10
+ # marvellous seven kingdoms.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
+ import logging
13
 
14
+ # We configure how logging messages should be displayed and which log level should be used before importing Haystack.
15
+ # Example log message:
16
+ # INFO - haystack.utils.preprocessing - Converting data/tutorial1/218_Olenna_Tyrell.txt
17
+ # Default log level in basicConfig is WARNING so the explicit parameter is not necessary but can be changed easily:
18
+ logging.basicConfig(format="%(levelname)s - %(name)s - %(message)s", level=logging.WARNING)
19
+ logging.getLogger("haystack").setLevel(logging.INFO)
20
+
21
+ from haystack.document_stores import ElasticsearchDocumentStore
22
+ from haystack.utils import clean_wiki_text, convert_files_to_docs, fetch_archive_from_http, print_answers, launch_es
23
+ from haystack.nodes import FARMReader, TransformersReader, BM25Retriever
24
+
25
+
26
+ def tutorial1_basic_qa_pipeline():
27
+ # ## Document Store
28
+ #
29
+ # Haystack finds answers to queries within the documents stored in a `DocumentStore`. The current implementations of
30
+ # `DocumentStore` include `ElasticsearchDocumentStore`, `FAISSDocumentStore`, `SQLDocumentStore`, and `InMemoryDocumentStore`.
31
+ #
32
+ # **Here:** We recommended Elasticsearch as it comes preloaded with features like full-text queries, BM25 retrieval,
33
+ # and vector storage for text embeddings.
34
+ # **Alternatives:** If you are unable to setup an Elasticsearch instance, then follow the Tutorial 3
35
+ # for using SQL/InMemory document stores.
36
+ # **Hint**:
37
+ # This tutorial creates a new document store instance with Wikipedia articles on Game of Thrones. However, you can
38
+ # configure Haystack to work with your existing document stores.
39
+ #
40
+ # Start an Elasticsearch server
41
+ # You can start Elasticsearch on your local machine instance using Docker. If Docker is not readily available in
42
+ # your environment (e.g. in Colab notebooks), then you can manually download and execute Elasticsearch from source.
43
+
44
+ launch_es()
45
+
46
+ # Connect to Elasticsearch
47
+ document_store = ElasticsearchDocumentStore(host="localhost", username="", password="", index="document")
48
+
49
+ # ## Preprocessing of documents
50
+ #
51
+ # Haystack provides a customizable pipeline for:
52
+ # - converting files into texts
53
+ # - cleaning texts
54
+ # - splitting texts
55
+ # - writing them to a Document Store
56
+
57
+ # In this tutorial, we download Wikipedia articles about Game of Thrones, apply a basic cleaning function, and add
58
+ # them in Elasticsearch.
59
+
60
+ # Let's first fetch some documents that we want to query
61
+ # Here: 517 Wikipedia articles for Game of Thrones
62
+ doc_dir = "data/tutorial1"
63
+ s3_url = "https://aws-ml-blog.s3.amazonaws.com/artifacts/kendra-docs/amazon_help_docs.zip"
64
+ fetch_archive_from_http(url=s3_url, output_dir=doc_dir)
65
+
66
+ # convert files to dicts containing documents that can be indexed to our datastore
67
  docs = convert_files_to_docs(dir_path=doc_dir, clean_func=clean_wiki_text, split_paragraphs=True)
68
+ # You can optionally supply a cleaning function that is applied to each doc (e.g. to remove footers)
69
+ # It must take a str as input, and return a str.
70
 
71
+ # Now, let's write the docs to our DB.
72
  document_store.write_documents(docs)
73
 
74
+ # ## Initialize Retriever & Reader
75
+ #
76
+ # ### Retriever
77
+ #
78
+ # Retrievers help narrowing down the scope for the Reader to smaller units of text where a given question
79
+ # could be answered.
80
+ #
81
+ # They use some simple but fast algorithm.
82
+ # **Here:** We use Elasticsearch's default BM25 algorithm
83
+ # **Alternatives:**
84
+ # - Customize the `BM25Retriever`with custom queries (e.g. boosting) and filters
85
+ # - Use `EmbeddingRetriever` to find candidate documents based on the similarity of
86
+ # embeddings (e.g. created via Sentence-BERT)
87
+ # - Use `TfidfRetriever` in combination with a SQL or InMemory Document store for simple prototyping and debugging
88
+
89
+ retriever = BM25Retriever(document_store=document_store)
90
+
91
+ # Alternative: An in-memory TfidfRetriever based on Pandas dataframes for building quick-prototypes
92
+ # with SQLite document store.
93
+ #
94
+ # from haystack.retriever.tfidf import TfidfRetriever
95
+ # retriever = TfidfRetriever(document_store=document_store)
96
+
97
+ # ### Reader
98
+ #
99
+ # A Reader scans the texts returned by retrievers in detail and extracts the k best answers. They are based
100
+ # on powerful, but slower deep learning models.
101
+ #
102
+ # Haystack currently supports Readers based on the frameworks FARM and Transformers.
103
+ # With both you can either load a local model or one from Hugging Face's model hub (https://huggingface.co/models).
104
+ # **Here:** a medium sized RoBERTa QA model using a Reader based on
105
+ # FARM (https://huggingface.co/deepset/roberta-base-squad2)
106
+ # **Alternatives (Reader):** TransformersReader (leveraging the `pipeline` of the Transformers package)
107
+ # **Alternatives (Models):** e.g. "distilbert-base-uncased-distilled-squad" (fast) or
108
+ # "deepset/bert-large-uncased-whole-word-masking-squad2" (good accuracy)
109
+ # **Hint:** You can adjust the model to return "no answer possible" with the no_ans_boost. Higher values mean
110
+ # the model prefers "no answer possible"
111
+ #
112
+ # #### FARMReader
113
+
114
+ # Load a local model or any of the QA models on
115
+ # Hugging Face's model hub (https://huggingface.co/models)
116
+ reader = FARMReader(model_name_or_path="deepset/roberta-base-squad2", use_gpu=True)
117
+
118
+ # #### TransformersReader
119
+
120
+ # Alternative:
121
+ # reader = TransformersReader(
122
+ # model_name_or_path="distilbert-base-uncased-distilled-squad", tokenizer="distilbert-base-uncased", use_gpu=-1)
123
+
124
+ # ### Pipeline
125
+ #
126
+ # With a Haystack `Pipeline` you can stick together your building blocks to a search pipeline.
127
+ # Under the hood, `Pipelines` are Directed Acyclic Graphs (DAGs) that you can easily customize for your own use cases.
128
+ # To speed things up, Haystack also comes with a few predefined Pipelines. One of them is the `ExtractiveQAPipeline` that combines a retriever and a reader to answer our questions.
129
+ # You can learn more about `Pipelines` in the [docs](https://haystack.deepset.ai/docs/latest/pipelinesmd).
130
+ from haystack.pipelines import ExtractiveQAPipeline
131
+
132
+ pipe = ExtractiveQAPipeline(reader, retriever)
133
+
134
+ ## Voilà! Ask a question!
135
+ prediction = pipe.run(
136
+ query="Who is the father of Arya Stark?", params={"Retriever": {"top_k": 10}, "Reader": {"top_k": 5}}
137
+ )
138
+
139
+ # prediction = pipe.run(query="Who created the Dothraki vocabulary?", params={"Reader": {"top_k": 5}})
140
+ # prediction = pipe.run(query="Who is the sister of Sansa?", params={"Reader": {"top_k": 5}})
141
+
142
+ # Now you can either print the object directly
143
+ print("\n\nRaw object:\n")
144
+ from pprint import pprint
145
+
146
+ pprint(prediction)
147
+
148
+ # Sample output:
149
+ # {
150
+ # 'answers': [ <Answer: answer='Eddard', type='extractive', score=0.9919578731060028, offsets_in_document=[{'start': 608, 'end': 615}], offsets_in_context=[{'start': 72, 'end': 79}], document_id='cc75f739897ecbf8c14657b13dda890e', meta={'name': '454_Music_of_Game_of_Thrones.txt'}}, context='...' >,
151
+ # <Answer: answer='Ned', type='extractive', score=0.9767240881919861, offsets_in_document=[{'start': 3687, 'end': 3801}], offsets_in_context=[{'start': 18, 'end': 132}], document_id='9acf17ec9083c4022f69eb4a37187080', meta={'name': '454_Music_of_Game_of_Thrones.txt'}}, context='...' >,
152
+ # ...
153
+ # ]
154
+ # 'documents': [ <Document: content_type='text', score=0.8034909798951382, meta={'name': '332_Sansa_Stark.txt'}, embedding=None, id=d1f36ec7170e4c46cde65787fe125dfe', content='\n===\'\'A Game of Thrones\'\'===\nSansa Stark begins the novel by being betrothed to Crown ...'>,
155
+ # <Document: content_type='text', score=0.8002150354529785, meta={'name': '191_Gendry.txt'}, embedding=None, id='dd4e070a22896afa81748d6510006d2', 'content='\n===Season 2===\nGendry travels North with Yoren and other Night's Watch recruits, including Arya ...'>,
156
+ # ...
157
+ # ],
158
+ # 'no_ans_gap': 11.688868522644043,
159
+ # 'node_id': 'Reader',
160
+ # 'params': {'Reader': {'top_k': 5}, 'Retriever': {'top_k': 5}},
161
+ # 'query': 'Who is the father of Arya Stark?',
162
+ # 'root_node': 'Query'
163
+ # }
164
+
165
+ # Note that the documents contained in the above object are the documents filtered by the Retriever from
166
+ # the document store. Although the answers were extracted from these documents, it's possible that many
167
+ # answers were taken from a single one of them, and that some of the documents were not source of any answer.
168
+
169
+ # Or use a util to simplify the output
170
+ # Change `minimum` to `medium` or `all` to raise the level of detail
171
+ print("\n\nSimplified output:\n")
172
+ print_answers(prediction, details="minimum")
173
+
174
+
175
+ if __name__ == "__main__":
176
+ tutorial1_basic_qa_pipeline()
177
+
178
+ # This Haystack script was made with love by deepset in Berlin, Germany
179
+ # Haystack: https://github.com/deepset-ai/haystack
180
+ # deepset: https://deepset.ai/