ofermend commited on
Commit
d40cd4a
1 Parent(s): 486e4f5

Update query.py

Browse files
Files changed (1) hide show
  1. query.py +125 -49
query.py CHANGED
@@ -8,31 +8,65 @@ def extract_between_tags(text, start_tag, end_tag):
8
  end_index = text.find(end_tag, start_index)
9
  return text[start_index+len(start_tag):end_index-len(end_tag)]
10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  class VectaraQuery():
12
  def __init__(self, api_key: str, customer_id: str, corpus_ids: list[str], prompt_name: str = None):
13
  self.customer_id = customer_id
14
  self.corpus_ids = corpus_ids
15
  self.api_key = api_key
16
- self.prompt_name = prompt_name if prompt_name else "vectara-summary-ext-v1.2.0"
17
  self.conv_id = None
18
 
19
- def submit_query(self, query_str: str):
20
  corpora_key_list = [{
21
  'customer_id': self.customer_id, 'corpus_id': corpus_id, 'lexical_interpolation_config': {'lambda': 0.025}
22
  } for corpus_id in self.corpus_ids
23
  ]
24
 
25
- endpoint = f"https://api.vectara.io/v1/query"
26
- start_tag = "%START_SNIPPET%"
27
- end_tag = "%END_SNIPPET%"
28
- headers = {
29
- "Content-Type": "application/json",
30
- "Accept": "application/json",
31
- "customer-id": self.customer_id,
32
- "x-api-key": self.api_key,
33
- "grpc-timeout": "60S"
34
- }
35
- body = {
36
  'query': [
37
  {
38
  'query': query_str,
@@ -42,8 +76,8 @@ class VectaraQuery():
42
  'context_config': {
43
  'sentences_before': 2,
44
  'sentences_after': 2,
45
- 'start_tag': start_tag,
46
- 'end_tag': end_tag,
47
  },
48
  'rerankingConfig':
49
  {
@@ -61,14 +95,27 @@ class VectaraQuery():
61
  'store': True,
62
  'conversationId': self.conv_id
63
  },
64
- # 'debug': True,
65
  }
66
  ]
67
  }
68
  ]
69
  }
70
-
71
- response = requests.post(endpoint, data=json.dumps(body), verify=True, headers=headers)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
  if response.status_code != 200:
73
  print(f"Query failed with code {response.status_code}, reason {response.reason}, text {response.text}")
74
  return "Sorry, something went wrong in my brain. Please try again later."
@@ -79,9 +126,9 @@ class VectaraQuery():
79
  summary = res['responseSet'][0]['summary'][0]['text']
80
  responses = res['responseSet'][0]['response'][:top_k]
81
  docs = res['responseSet'][0]['document']
82
- chat = res['responseSet'][0]['summary'][0]['chat']
83
 
84
- if chat['status'] != None:
85
  st_code = chat['status']
86
  print(f"Chat query failed with code {st_code}")
87
  if st_code == 'RESOURCE_EXHAUSTED':
@@ -89,34 +136,63 @@ class VectaraQuery():
89
  return 'Sorry, Vectara chat turns exceeds plan limit.'
90
  return 'Sorry, something went wrong in my brain. Please try again later.'
91
 
92
- self.conv_id = res['responseSet'][0]['summary'][0]['chat']['conversationId']
93
-
94
- pattern = r'\[\d{1,2}\]'
95
- matches = [match.span() for match in re.finditer(pattern, summary)]
96
 
97
- # figure out unique list of references
98
- refs = []
99
- for match in matches:
100
- start, end = match
101
- response_num = int(summary[start+1:end-1])
102
- doc_num = responses[response_num-1]['documentIndex']
103
- metadata = {item['name']: item['value'] for item in docs[doc_num]['metadata']}
104
- text = extract_between_tags(responses[response_num-1]['text'], start_tag, end_tag)
105
- if 'url' in metadata.keys():
106
- url = f"{metadata['url']}#:~:text={quote(text)}"
107
- if url not in refs:
108
- refs.append(url)
109
 
110
- # replace references with markdown links
111
- refs_dict = {url:(inx+1) for inx,url in enumerate(refs)}
112
- for match in reversed(matches):
113
- start, end = match
114
- response_num = int(summary[start+1:end-1])
115
- doc_num = responses[response_num-1]['documentIndex']
116
- metadata = {item['name']: item['value'] for item in docs[doc_num]['metadata']}
117
- text = extract_between_tags(responses[response_num-1]['text'], start_tag, end_tag)
118
- url = f"{metadata['url']}#:~:text={quote(text)}"
119
- citation_inx = refs_dict[url]
120
- summary = summary[:start] + f'[\[{citation_inx}\]]({url})' + summary[end:]
121
-
122
- return summary
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  end_index = text.find(end_tag, start_index)
9
  return text[start_index+len(start_tag):end_index-len(end_tag)]
10
 
11
+ class CitationNormalizer():
12
+
13
+ def __init__(self, responses, docs):
14
+ self.docs = docs
15
+ self.responses = responses
16
+ self.refs = []
17
+
18
+ def normalize_citations(self, summary):
19
+ start_tag = "%START_SNIPPET%"
20
+ end_tag = "%END_SNIPPET%"
21
+
22
+ # find all references in the summary
23
+ pattern = r'\[\d{1,2}\]'
24
+ matches = [match.span() for match in re.finditer(pattern, summary)]
25
+
26
+ # figure out unique list of references
27
+ for match in matches:
28
+ start, end = match
29
+ response_num = int(summary[start+1:end-1])
30
+ doc_num = self.responses[response_num-1]['documentIndex']
31
+ metadata = {item['name']: item['value'] for item in self.docs[doc_num]['metadata']}
32
+ text = extract_between_tags(self.responses[response_num-1]['text'], start_tag, end_tag)
33
+ if 'url' in metadata.keys():
34
+ url = f"{metadata['url']}#:~:text={quote(text)}"
35
+ if url not in self.refs:
36
+ self.refs.append(url)
37
+
38
+ # replace references with markdown links
39
+ refs_dict = {url:(inx+1) for inx,url in enumerate(self.refs)}
40
+ for match in reversed(matches):
41
+ start, end = match
42
+ response_num = int(summary[start+1:end-1])
43
+ doc_num = self.responses[response_num-1]['documentIndex']
44
+ metadata = {item['name']: item['value'] for item in self.docs[doc_num]['metadata']}
45
+ text = extract_between_tags(self.responses[response_num-1]['text'], start_tag, end_tag)
46
+ if 'url' in metadata.keys():
47
+ url = f"{metadata['url']}#:~:text={quote(text)}"
48
+ citation_inx = refs_dict[url]
49
+ summary = summary[:start] + f'[\[{citation_inx}\]]({url})' + summary[end:]
50
+ else:
51
+ summary = summary[:start] + summary[end:]
52
+
53
+ return summary
54
+
55
  class VectaraQuery():
56
  def __init__(self, api_key: str, customer_id: str, corpus_ids: list[str], prompt_name: str = None):
57
  self.customer_id = customer_id
58
  self.corpus_ids = corpus_ids
59
  self.api_key = api_key
60
+ self.prompt_name = prompt_name if prompt_name else "vectara-experimental-summary-ext-2023-12-11-sml"
61
  self.conv_id = None
62
 
63
+ def get_body(self, query_str: str):
64
  corpora_key_list = [{
65
  'customer_id': self.customer_id, 'corpus_id': corpus_id, 'lexical_interpolation_config': {'lambda': 0.025}
66
  } for corpus_id in self.corpus_ids
67
  ]
68
 
69
+ return {
 
 
 
 
 
 
 
 
 
 
70
  'query': [
71
  {
72
  'query': query_str,
 
76
  'context_config': {
77
  'sentences_before': 2,
78
  'sentences_after': 2,
79
+ 'start_tag': "%START_SNIPPET%",
80
+ 'end_tag': "%END_SNIPPET%",
81
  },
82
  'rerankingConfig':
83
  {
 
95
  'store': True,
96
  'conversationId': self.conv_id
97
  },
 
98
  }
99
  ]
100
  }
101
  ]
102
  }
103
+
104
+ def get_headers(self):
105
+ return {
106
+ "Content-Type": "application/json",
107
+ "Accept": "application/json",
108
+ "customer-id": self.customer_id,
109
+ "x-api-key": self.api_key,
110
+ "grpc-timeout": "60S"
111
+ }
112
+
113
+ def submit_query(self, query_str: str):
114
+
115
+ endpoint = f"https://api.vectara.io/v1/query"
116
+ body = self.get_body(query_str)
117
+
118
+ response = requests.post(endpoint, data=json.dumps(body), verify=True, headers=self.get_headers())
119
  if response.status_code != 200:
120
  print(f"Query failed with code {response.status_code}, reason {response.reason}, text {response.text}")
121
  return "Sorry, something went wrong in my brain. Please try again later."
 
126
  summary = res['responseSet'][0]['summary'][0]['text']
127
  responses = res['responseSet'][0]['response'][:top_k]
128
  docs = res['responseSet'][0]['document']
129
+ chat = res['responseSet'][0]['summary'][0].get('chat', None)
130
 
131
+ if chat and chat['status'] is not None:
132
  st_code = chat['status']
133
  print(f"Chat query failed with code {st_code}")
134
  if st_code == 'RESOURCE_EXHAUSTED':
 
136
  return 'Sorry, Vectara chat turns exceeds plan limit.'
137
  return 'Sorry, something went wrong in my brain. Please try again later.'
138
 
139
+ self.conv_id = chat['conversationId'] if chat else None
140
+ summary = CitationNormalizer(responses, docs).normalize_citations(summary)
141
+ return summary
 
142
 
143
+ def submit_query_streaming(self, query_str: str):
 
 
 
 
 
 
 
 
 
 
 
144
 
145
+ endpoint = f"https://api.vectara.io/v1/stream-query"
146
+ body = self.get_body(query_str)
147
+
148
+ response = requests.post(endpoint, data=json.dumps(body), verify=True, headers=self.get_headers(), stream=True)
149
+ if response.status_code != 200:
150
+ print(f"Query failed with code {response.status_code}, reason {response.reason}, text {response.text}")
151
+ return "Sorry, something went wrong in my brain. Please try again later."
152
+
153
+ chunks = []
154
+ accumulated_text = "" # Initialize text accumulation
155
+ pattern_max_length = 50 # Example heuristic
156
+ for line in response.iter_lines():
157
+ if line: # filter out keep-alive new lines
158
+ data = json.loads(line.decode('utf-8'))
159
+ res = data['result']
160
+ response_set = res['responseSet']
161
+ if response_set is None:
162
+ # grab next chunk and yield it as output
163
+ summary = res.get('summary', None)
164
+ if summary is None or len(summary)==0:
165
+ continue
166
+ else:
167
+ chat = summary.get('chat', None)
168
+ if chat and chat.get('status', None):
169
+ st_code = chat['status']
170
+ print(f"Chat query failed with code {st_code}")
171
+ if st_code == 'RESOURCE_EXHAUSTED':
172
+ self.conv_id = None
173
+ return 'Sorry, Vectara chat turns exceeds plan limit.'
174
+ return 'Sorry, something went wrong in my brain. Please try again later.'
175
+ conv_id = chat.get('conversationId', None) if chat else None
176
+ if conv_id:
177
+ self.conv_id = conv_id
178
+
179
+ chunk = summary['text']
180
+ accumulated_text += chunk # Append current chunk to accumulation
181
+ if len(accumulated_text) > pattern_max_length:
182
+ accumulated_text = re.sub(r"\[\d+\]", "", accumulated_text)
183
+ accumulated_text = re.sub(r"\s+\.", ".", accumulated_text)
184
+ out_chunk = accumulated_text[:-pattern_max_length]
185
+ chunks.append(out_chunk)
186
+ yield out_chunk
187
+ accumulated_text = accumulated_text[-pattern_max_length:]
188
+
189
+ if summary['done']:
190
+ break
191
+
192
+ # yield the last piece
193
+ if len(accumulated_text) > 0:
194
+ accumulated_text = re.sub(r" \[\d+\]\.", ".", accumulated_text)
195
+ chunks.append(accumulated_text)
196
+ yield accumulated_text
197
+
198
+ return ''.join(chunks)