Liyan06
commited on
Commit
·
9695d05
1
Parent(s):
1de34ec
add server-sent events (SSE) function
Browse files- handler.py +19 -12
handler.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
from minicheck_web.minicheck import MiniCheck
|
2 |
from web_retrieval import *
|
|
|
3 |
|
4 |
|
5 |
def sort_chunks_single_doc_claim(used_chunk, support_prob_per_chunk):
|
@@ -39,32 +40,38 @@ class EndpointHandler():
|
|
39 |
assert len(data['inputs']['claims']) == 1, "Only one claim is allowed for web retrieval for the current version."
|
40 |
|
41 |
claim = data['inputs']['claims'][0]
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
|
|
|
|
|
|
|
|
|
|
49 |
|
50 |
-
return
|
|
|
51 |
|
52 |
|
53 |
def search_relevant_docs(self, claim, timeout=10, max_search_results_per_query=5, allow_duplicated_urls=False):
|
54 |
|
55 |
search_results = search_google(claim, timeout=timeout)
|
56 |
|
57 |
-
|
58 |
start = time()
|
59 |
with concurrent.futures.ThreadPoolExecutor() as e:
|
60 |
scraped_results = e.map(scrape_url, search_results, itertools.repeat(timeout))
|
61 |
end = time()
|
62 |
-
|
63 |
scraped_results = [(r[0][:50000], r[1]) for r in scraped_results if r[0] and '��' not in r[0] and ".pdf" not in r[1]]
|
64 |
|
65 |
retrieved_docs, urls = zip(*scraped_results[:max_search_results_per_query])
|
66 |
|
67 |
-
|
68 |
start = time()
|
69 |
retrieved_data = {
|
70 |
'inputs': {
|
@@ -75,7 +82,7 @@ class EndpointHandler():
|
|
75 |
_, _, used_chunk, support_prob_per_chunk = self.scorer.score(data=retrieved_data)
|
76 |
end = time()
|
77 |
num_chunks = len([item for items in used_chunk for item in items])
|
78 |
-
|
79 |
|
80 |
ranked_docs, scores, ranked_urls = order_doc_score_url(used_chunk, support_prob_per_chunk, urls, allow_duplicated_urls=allow_duplicated_urls)
|
81 |
|
|
|
1 |
from minicheck_web.minicheck import MiniCheck
|
2 |
from web_retrieval import *
|
3 |
+
import json
|
4 |
|
5 |
|
6 |
def sort_chunks_single_doc_claim(used_chunk, support_prob_per_chunk):
|
|
|
40 |
assert len(data['inputs']['claims']) == 1, "Only one claim is allowed for web retrieval for the current version."
|
41 |
|
42 |
claim = data['inputs']['claims'][0]
|
43 |
+
progress_generator = self.search_relevant_docs(claim)
|
44 |
+
|
45 |
+
def generate_sse():
|
46 |
+
for progress in progress_generator:
|
47 |
+
yield f"data: {progress}\n"
|
48 |
+
ranked_docs, scores, ranked_urls = progress_generator.send(None)
|
49 |
+
outputs = {
|
50 |
+
'ranked_docs': ranked_docs,
|
51 |
+
'scores': scores,
|
52 |
+
'ranked_urls': ranked_urls
|
53 |
+
}
|
54 |
+
yield f"data: {json.dumps(outputs)}\n"
|
55 |
|
56 |
+
return generate_sse()
|
57 |
+
|
58 |
|
59 |
|
60 |
def search_relevant_docs(self, claim, timeout=10, max_search_results_per_query=5, allow_duplicated_urls=False):
|
61 |
|
62 |
search_results = search_google(claim, timeout=timeout)
|
63 |
|
64 |
+
yield 'Searching webpages...'
|
65 |
start = time()
|
66 |
with concurrent.futures.ThreadPoolExecutor() as e:
|
67 |
scraped_results = e.map(scrape_url, search_results, itertools.repeat(timeout))
|
68 |
end = time()
|
69 |
+
yield f"Finished searching in {round((end - start), 1)} seconds."
|
70 |
scraped_results = [(r[0][:50000], r[1]) for r in scraped_results if r[0] and '��' not in r[0] and ".pdf" not in r[1]]
|
71 |
|
72 |
retrieved_docs, urls = zip(*scraped_results[:max_search_results_per_query])
|
73 |
|
74 |
+
yield 'Scoring webpages...'
|
75 |
start = time()
|
76 |
retrieved_data = {
|
77 |
'inputs': {
|
|
|
82 |
_, _, used_chunk, support_prob_per_chunk = self.scorer.score(data=retrieved_data)
|
83 |
end = time()
|
84 |
num_chunks = len([item for items in used_chunk for item in items])
|
85 |
+
yield f'Finished {num_chunks} entailment checks in {round((end - start), 1)} seconds ({round(num_chunks / (end - start) * 60)} Doc./min).'
|
86 |
|
87 |
ranked_docs, scores, ranked_urls = order_doc_score_url(used_chunk, support_prob_per_chunk, urls, allow_duplicated_urls=allow_duplicated_urls)
|
88 |
|