KevinHuSh commited on
Commit
c5ea37c
·
1 Parent(s): eb8254e

Add resume parser and fix bugs (#59)

Browse files

* Update .gitignore

* Update .gitignore

* Add resume parser and fix bugs

.gitignore CHANGED
@@ -3,6 +3,10 @@
3
  debug/
4
  target/
5
  __pycache__/
 
 
 
 
6
 
7
  # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
8
  # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
 
3
  debug/
4
  target/
5
  __pycache__/
6
+ hudet/
7
+ cv/
8
+ layout_app.py
9
+ resume/
10
 
11
  # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
12
  # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
api/apps/chunk_app.py CHANGED
@@ -47,17 +47,20 @@ def list():
47
  tenant_id = DocumentService.get_tenant_id(req["doc_id"])
48
  if not tenant_id:
49
  return get_data_error_result(retmsg="Tenant not found!")
 
 
 
50
  query = {
51
  "doc_ids": [doc_id], "page": page, "size": size, "question": question
52
  }
53
  if "available_int" in req:
54
  query["available_int"] = int(req["available_int"])
55
  sres = retrievaler.search(query, search.index_name(tenant_id))
56
- res = {"total": sres.total, "chunks": []}
57
  for id in sres.ids:
58
  d = {
59
  "chunk_id": id,
60
- "content_with_weight": rmSpace(sres.highlight[id]) if question else sres.field[id]["content_with_weight"],
61
  "doc_id": sres.field[id]["doc_id"],
62
  "docnm_kwd": sres.field[id]["docnm_kwd"],
63
  "important_kwd": sres.field[id].get("important_kwd", []),
@@ -110,7 +113,7 @@ def get():
110
  "important_kwd")
111
  def set():
112
  req = request.json
113
- d = {"id": req["chunk_id"]}
114
  d["content_ltks"] = huqie.qie(req["content_with_weight"])
115
  d["content_sm_ltks"] = huqie.qieqie(d["content_ltks"])
116
  d["important_kwd"] = req["important_kwd"]
@@ -181,11 +184,12 @@ def create():
181
  md5 = hashlib.md5()
182
  md5.update((req["content_with_weight"] + req["doc_id"]).encode("utf-8"))
183
  chunck_id = md5.hexdigest()
184
- d = {"id": chunck_id, "content_ltks": huqie.qie(req["content_with_weight"])}
185
  d["content_sm_ltks"] = huqie.qieqie(d["content_ltks"])
186
  d["important_kwd"] = req.get("important_kwd", [])
187
  d["important_tks"] = huqie.qie(" ".join(req.get("important_kwd", [])))
188
  d["create_time"] = str(datetime.datetime.now()).replace("T", " ")[:19]
 
189
 
190
  try:
191
  e, doc = DocumentService.get_by_id(req["doc_id"])
 
47
  tenant_id = DocumentService.get_tenant_id(req["doc_id"])
48
  if not tenant_id:
49
  return get_data_error_result(retmsg="Tenant not found!")
50
+ e, doc = DocumentService.get_by_id(doc_id)
51
+ if not e:
52
+ return get_data_error_result(retmsg="Document not found!")
53
  query = {
54
  "doc_ids": [doc_id], "page": page, "size": size, "question": question
55
  }
56
  if "available_int" in req:
57
  query["available_int"] = int(req["available_int"])
58
  sres = retrievaler.search(query, search.index_name(tenant_id))
59
+ res = {"total": sres.total, "chunks": [], "doc": doc.to_dict()}
60
  for id in sres.ids:
61
  d = {
62
  "chunk_id": id,
63
+ "content_with_weight": rmSpace(sres.highlight[id]) if question else sres.field[id].get("content_with_weight", ""),
64
  "doc_id": sres.field[id]["doc_id"],
65
  "docnm_kwd": sres.field[id]["docnm_kwd"],
66
  "important_kwd": sres.field[id].get("important_kwd", []),
 
113
  "important_kwd")
114
  def set():
115
  req = request.json
116
+ d = {"id": req["chunk_id"], "content_with_weight": req["content_with_weight"]}
117
  d["content_ltks"] = huqie.qie(req["content_with_weight"])
118
  d["content_sm_ltks"] = huqie.qieqie(d["content_ltks"])
119
  d["important_kwd"] = req["important_kwd"]
 
184
  md5 = hashlib.md5()
185
  md5.update((req["content_with_weight"] + req["doc_id"]).encode("utf-8"))
186
  chunck_id = md5.hexdigest()
187
+ d = {"id": chunck_id, "content_ltks": huqie.qie(req["content_with_weight"]), "content_with_weight": req["content_with_weight"]}
188
  d["content_sm_ltks"] = huqie.qieqie(d["content_ltks"])
189
  d["important_kwd"] = req.get("important_kwd", [])
190
  d["important_tks"] = huqie.qie(" ".join(req.get("important_kwd", [])))
191
  d["create_time"] = str(datetime.datetime.now()).replace("T", " ")[:19]
192
+ d["create_timestamp_flt"] = datetime.datetime.now().timestamp()
193
 
194
  try:
195
  e, doc = DocumentService.get_by_id(req["doc_id"])
api/apps/conversation_app.py CHANGED
@@ -13,16 +13,21 @@
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
15
  #
 
 
16
  from flask import request
17
  from flask_login import login_required
18
  from api.db.services.dialog_service import DialogService, ConversationService
19
  from api.db import LLMType
20
- from api.db.services.llm_service import LLMService, TenantLLMService, LLMBundle
 
 
21
  from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
22
  from api.utils import get_uuid
23
  from api.utils.api_utils import get_json_result
24
  from rag.llm import ChatModel
25
  from rag.nlp import retrievaler
 
26
  from rag.utils import num_tokens_from_string, encoder
27
 
28
 
@@ -163,6 +168,17 @@ def chat(dialog, messages, **kwargs):
163
  if not llm:
164
  raise LookupError("LLM(%s) not found"%dialog.llm_id)
165
  llm = llm[0]
 
 
 
 
 
 
 
 
 
 
 
166
  prompt_config = dialog.prompt_config
167
  for p in prompt_config["parameters"]:
168
  if p["key"] == "knowledge":continue
@@ -170,9 +186,6 @@ def chat(dialog, messages, **kwargs):
170
  if p["key"] not in kwargs:
171
  prompt_config["system"] = prompt_config["system"].replace("{%s}"%p["key"], " ")
172
 
173
- question = messages[-1]["content"]
174
- embd_mdl = LLMBundle(dialog.tenant_id, LLMType.EMBEDDING)
175
- chat_mdl = LLMBundle(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
176
  kbinfos = retrievaler.retrieval(question, embd_mdl, dialog.tenant_id, dialog.kb_ids, 1, dialog.top_n, dialog.similarity_threshold,
177
  dialog.vector_similarity_weight, top=1024, aggs=False)
178
  knowledges = [ck["content_with_weight"] for ck in kbinfos["chunks"]]
@@ -196,4 +209,46 @@ def chat(dialog, messages, **kwargs):
196
  vtweight=dialog.vector_similarity_weight)
197
  for c in kbinfos["chunks"]:
198
  if c.get("vector"):del c["vector"]
199
- return {"answer": answer, "retrieval": kbinfos}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
15
  #
16
+ import re
17
+
18
  from flask import request
19
  from flask_login import login_required
20
  from api.db.services.dialog_service import DialogService, ConversationService
21
  from api.db import LLMType
22
+ from api.db.services.knowledgebase_service import KnowledgebaseService
23
+ from api.db.services.llm_service import LLMService, LLMBundle
24
+ from api.settings import access_logger
25
  from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
26
  from api.utils import get_uuid
27
  from api.utils.api_utils import get_json_result
28
  from rag.llm import ChatModel
29
  from rag.nlp import retrievaler
30
+ from rag.nlp.search import index_name
31
  from rag.utils import num_tokens_from_string, encoder
32
 
33
 
 
168
  if not llm:
169
  raise LookupError("LLM(%s) not found"%dialog.llm_id)
170
  llm = llm[0]
171
+ question = messages[-1]["content"]
172
+ embd_mdl = LLMBundle(dialog.tenant_id, LLMType.EMBEDDING)
173
+ chat_mdl = LLMBundle(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
174
+
175
+ field_map = KnowledgebaseService.get_field_map(dialog.kb_ids)
176
+ ## try to use sql if field mapping is good to go
177
+ if field_map:
178
+ markdown_tbl,chunks = use_sql(question, field_map, dialog.tenant_id, chat_mdl)
179
+ if markdown_tbl:
180
+ return {"answer": markdown_tbl, "retrieval": {"chunks": chunks}}
181
+
182
  prompt_config = dialog.prompt_config
183
  for p in prompt_config["parameters"]:
184
  if p["key"] == "knowledge":continue
 
186
  if p["key"] not in kwargs:
187
  prompt_config["system"] = prompt_config["system"].replace("{%s}"%p["key"], " ")
188
 
 
 
 
189
  kbinfos = retrievaler.retrieval(question, embd_mdl, dialog.tenant_id, dialog.kb_ids, 1, dialog.top_n, dialog.similarity_threshold,
190
  dialog.vector_similarity_weight, top=1024, aggs=False)
191
  knowledges = [ck["content_with_weight"] for ck in kbinfos["chunks"]]
 
209
  vtweight=dialog.vector_similarity_weight)
210
  for c in kbinfos["chunks"]:
211
  if c.get("vector"):del c["vector"]
212
+ return {"answer": answer, "retrieval": kbinfos}
213
+
214
+
215
+ def use_sql(question,field_map, tenant_id, chat_mdl):
216
+ sys_prompt = "你是一个DBA。你需要这对以下表的字段结构,根据我的问题写出sql。"
217
+ user_promt = """
218
+ 表名:{};
219
+ 数据库表字段说明如下:
220
+ {}
221
+
222
+ 问题:{}
223
+ 请写出SQL。
224
+ """.format(
225
+ index_name(tenant_id),
226
+ "\n".join([f"{k}: {v}" for k,v in field_map.items()]),
227
+ question
228
+ )
229
+ sql = chat_mdl.chat(sys_prompt, [{"role": "user", "content": user_promt}], {"temperature": 0.1})
230
+ sql = re.sub(r".*?select ", "select ", sql, flags=re.IGNORECASE)
231
+ sql = re.sub(r" +", " ", sql)
232
+ if sql[:len("select ")].lower() != "select ":
233
+ return None, None
234
+ if sql[:len("select *")].lower() != "select *":
235
+ sql = "select doc_id,docnm_kwd," + sql[6:]
236
+
237
+ tbl = retrievaler.sql_retrieval(sql)
238
+ if not tbl: return None, None
239
+
240
+ docid_idx = set([ii for ii, c in enumerate(tbl["columns"]) if c["name"] == "doc_id"])
241
+ docnm_idx = set([ii for ii, c in enumerate(tbl["columns"]) if c["name"] == "docnm_kwd"])
242
+ clmn_idx = [ii for ii in range(len(tbl["columns"])) if ii not in (docid_idx|docnm_idx)]
243
+
244
+ clmns = "|".join([re.sub(r"/.*", "", field_map.get(tbl["columns"][i]["name"], f"C{i}")) for i in clmn_idx]) + "|原文"
245
+ line = "|".join(["------" for _ in range(len(clmn_idx))]) + "|------"
246
+ rows = ["|".join([str(r[i]) for i in clmn_idx])+"|" for r in tbl["rows"]]
247
+ if not docid_idx or not docnm_idx:
248
+ access_logger.error("SQL missing field: " + sql)
249
+ return "\n".join([clmns, line, "\n".join(rows)]), []
250
+
251
+ rows = "\n".join([r+f"##{ii}$$" for ii,r in enumerate(rows)])
252
+ docid_idx = list(docid_idx)[0]
253
+ docnm_idx = list(docnm_idx)[0]
254
+ return "\n".join([clmns, line, rows]), [{"doc_id": r[docid_idx], "docnm_kwd": r[docnm_idx]} for r in tbl["rows"]]
api/apps/document_app.py CHANGED
@@ -21,9 +21,6 @@ import flask
21
  from elasticsearch_dsl import Q
22
  from flask import request
23
  from flask_login import login_required, current_user
24
-
25
- from api.db.db_models import Task
26
- from api.db.services.task_service import TaskService
27
  from rag.nlp import search
28
  from rag.utils import ELASTICSEARCH
29
  from api.db.services import duplicate_name
@@ -35,7 +32,7 @@ from api.db.services.document_service import DocumentService
35
  from api.settings import RetCode
36
  from api.utils.api_utils import get_json_result
37
  from rag.utils.minio_conn import MINIO
38
- from api.utils.file_utils import filename_type
39
 
40
 
41
  @manager.route('/upload', methods=['POST'])
@@ -78,7 +75,8 @@ def upload():
78
  "type": filename_type(filename),
79
  "name": filename,
80
  "location": location,
81
- "size": len(blob)
 
82
  })
83
  return get_json_result(data=doc.to_json())
84
  except Exception as e:
 
21
  from elasticsearch_dsl import Q
22
  from flask import request
23
  from flask_login import login_required, current_user
 
 
 
24
  from rag.nlp import search
25
  from rag.utils import ELASTICSEARCH
26
  from api.db.services import duplicate_name
 
32
  from api.settings import RetCode
33
  from api.utils.api_utils import get_json_result
34
  from rag.utils.minio_conn import MINIO
35
+ from api.utils.file_utils import filename_type, thumbnail
36
 
37
 
38
  @manager.route('/upload', methods=['POST'])
 
75
  "type": filename_type(filename),
76
  "name": filename,
77
  "location": location,
78
+ "size": len(blob),
79
+ "thumbnail": thumbnail(filename, blob)
80
  })
81
  return get_json_result(data=doc.to_json())
82
  except Exception as e:
api/db/db_models.py CHANGED
@@ -474,7 +474,7 @@ class Knowledgebase(DataBaseModel):
474
  vector_similarity_weight = FloatField(default=0.3)
475
 
476
  parser_id = CharField(max_length=32, null=False, help_text="default parser ID", default=ParserType.GENERAL.value)
477
- parser_config = JSONField(null=False, default={"from_page":0, "to_page": 100000})
478
  status = CharField(max_length=1, null=True, help_text="is it validate(0: wasted,1: validate)", default="1")
479
 
480
  def __str__(self):
@@ -489,7 +489,7 @@ class Document(DataBaseModel):
489
  thumbnail = TextField(null=True, help_text="thumbnail base64 string")
490
  kb_id = CharField(max_length=256, null=False, index=True)
491
  parser_id = CharField(max_length=32, null=False, help_text="default parser ID")
492
- parser_config = JSONField(null=False, default={"from_page":0, "to_page": 100000})
493
  source_type = CharField(max_length=128, null=False, default="local", help_text="where dose this document from")
494
  type = CharField(max_length=32, null=False, help_text="file extension")
495
  created_by = CharField(max_length=32, null=False, help_text="who created it")
 
474
  vector_similarity_weight = FloatField(default=0.3)
475
 
476
  parser_id = CharField(max_length=32, null=False, help_text="default parser ID", default=ParserType.GENERAL.value)
477
+ parser_config = JSONField(null=False, default={"pages":[[0,1000000]]})
478
  status = CharField(max_length=1, null=True, help_text="is it validate(0: wasted,1: validate)", default="1")
479
 
480
  def __str__(self):
 
489
  thumbnail = TextField(null=True, help_text="thumbnail base64 string")
490
  kb_id = CharField(max_length=256, null=False, index=True)
491
  parser_id = CharField(max_length=32, null=False, help_text="default parser ID")
492
+ parser_config = JSONField(null=False, default={"pages":[[0,1000000]]})
493
  source_type = CharField(max_length=128, null=False, default="local", help_text="where dose this document from")
494
  type = CharField(max_length=32, null=False, help_text="file extension")
495
  created_by = CharField(max_length=32, null=False, help_text="who created it")
api/db/services/dialog_service.py CHANGED
@@ -21,5 +21,6 @@ class DialogService(CommonService):
21
  model = Dialog
22
 
23
 
 
24
  class ConversationService(CommonService):
25
  model = Conversation
 
21
  model = Dialog
22
 
23
 
24
+
25
  class ConversationService(CommonService):
26
  model = Conversation
api/db/services/knowledgebase_service.py CHANGED
@@ -63,3 +63,31 @@ class KnowledgebaseService(CommonService):
63
  d = kbs[0].to_dict()
64
  d["embd_id"] = kbs[0].tenant.embd_id
65
  return d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  d = kbs[0].to_dict()
64
  d["embd_id"] = kbs[0].tenant.embd_id
65
  return d
66
+
67
+ @classmethod
68
+ @DB.connection_context()
69
+ def update_parser_config(cls, id, config):
70
+ e, m = cls.get_by_id(id)
71
+ if not e:raise LookupError(f"knowledgebase({id}) not found.")
72
+ def dfs_update(old, new):
73
+ for k,v in new.items():
74
+ if k not in old:
75
+ old[k] = v
76
+ continue
77
+ if isinstance(v, dict):
78
+ assert isinstance(old[k], dict)
79
+ dfs_update(old[k], v)
80
+ else: old[k] = v
81
+ dfs_update(m.parser_config, config)
82
+ cls.update_by_id(id, m.parser_config)
83
+
84
+
85
+ @classmethod
86
+ @DB.connection_context()
87
+ def get_field_map(cls, ids):
88
+ conf = {}
89
+ for k in cls.get_by_ids(ids):
90
+ if k.parser_config and "field_map" in k.parser_config:
91
+ conf.update(k.parser_config)
92
+ return conf
93
+
api/utils/file_utils.py CHANGED
@@ -13,11 +13,14 @@
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
15
  #
16
-
17
  import json
18
  import os
19
  import re
 
20
 
 
 
21
  from cachetools import LRUCache, cached
22
  from ruamel.yaml import YAML
23
 
@@ -150,4 +153,33 @@ def filename_type(filename):
150
  return FileType.AURAL.value
151
 
152
  if re.match(r".*\.(jpg|jpeg|png|tif|gif|pcx|tga|exif|fpx|svg|psd|cdr|pcd|dxf|ufo|eps|ai|raw|WMF|webp|avif|apng|icon|ico|mpg|mpeg|avi|rm|rmvb|mov|wmv|asf|dat|asx|wvx|mpe|mpa|mp4)$", filename):
153
- return FileType.VISUAL
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
15
  #
16
+ import base64
17
  import json
18
  import os
19
  import re
20
+ from io import BytesIO
21
 
22
+ import fitz
23
+ from PIL import Image
24
  from cachetools import LRUCache, cached
25
  from ruamel.yaml import YAML
26
 
 
153
  return FileType.AURAL.value
154
 
155
  if re.match(r".*\.(jpg|jpeg|png|tif|gif|pcx|tga|exif|fpx|svg|psd|cdr|pcd|dxf|ufo|eps|ai|raw|WMF|webp|avif|apng|icon|ico|mpg|mpeg|avi|rm|rmvb|mov|wmv|asf|dat|asx|wvx|mpe|mpa|mp4)$", filename):
156
+ return FileType.VISUAL
157
+
158
+
159
+ def thumbnail(filename, blob):
160
+ filename = filename.lower()
161
+ if re.match(r".*\.pdf$", filename):
162
+ pdf = fitz.open(stream=blob, filetype="pdf")
163
+ pix = pdf[0].get_pixmap(matrix=fitz.Matrix(0.03, 0.03))
164
+ buffered = BytesIO()
165
+ Image.frombytes("RGB", [pix.width, pix.height],
166
+ pix.samples).save(buffered, format="png")
167
+ return "data:image/png;base64," + base64.b64encode(buffered.getvalue())
168
+
169
+ if re.match(r".*\.(jpg|jpeg|png|tif|gif|icon|ico|webp)$", filename):
170
+ return ("data:image/%s;base64,"%filename.split(".")[-1]) + base64.b64encode(Image.open(BytesIO(blob)).thumbnail((30, 30)).tobytes())
171
+
172
+ if re.match(r".*\.(ppt|pptx)$", filename):
173
+ import aspose.slides as slides
174
+ import aspose.pydrawing as drawing
175
+ try:
176
+ with slides.Presentation(BytesIO(blob)) as presentation:
177
+ buffered = BytesIO()
178
+ presentation.slides[0].get_thumbnail(0.03, 0.03).save(buffered, drawing.imaging.ImageFormat.png)
179
+ return "data:image/png;base64," + base64.b64encode(buffered.getvalue())
180
+ except Exception as e:
181
+ pass
182
+
183
+
184
+
185
+
rag/app/paper.py CHANGED
@@ -3,7 +3,6 @@ import re
3
  from collections import Counter
4
 
5
  from api.db import ParserType
6
- from rag.cv.ppdetection import PPDet
7
  from rag.parser import tokenize
8
  from rag.nlp import huqie
9
  from rag.parser.pdf_parser import HuParser
 
3
  from collections import Counter
4
 
5
  from api.db import ParserType
 
6
  from rag.parser import tokenize
7
  from rag.nlp import huqie
8
  from rag.parser.pdf_parser import HuParser
rag/app/resume.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import json
3
+ import os
4
+ import re
5
+ import requests
6
+ from api.db.services.knowledgebase_service import KnowledgebaseService
7
+ from rag.nlp import huqie
8
+
9
+ from rag.settings import cron_logger
10
+ from rag.utils import rmSpace
11
+
12
+
13
+ def chunk(filename, binary=None, callback=None, **kwargs):
14
+ if not re.search(r"\.(pdf|doc|docx|txt)$", filename, flags=re.IGNORECASE): raise NotImplementedError("file type not supported yet(pdf supported)")
15
+
16
+ url = os.environ.get("INFINIFLOW_SERVER")
17
+ if not url:raise EnvironmentError("Please set environment variable: 'INFINIFLOW_SERVER'")
18
+ token = os.environ.get("INFINIFLOW_TOKEN")
19
+ if not token:raise EnvironmentError("Please set environment variable: 'INFINIFLOW_TOKEN'")
20
+
21
+ if not binary:
22
+ with open(filename, "rb") as f: binary = f.read()
23
+ def remote_call():
24
+ nonlocal filename, binary
25
+ for _ in range(3):
26
+ try:
27
+ res = requests.post(url + "/v1/layout/resume/", files=[(filename, binary)],
28
+ headers={"Authorization": token}, timeout=180)
29
+ res = res.json()
30
+ if res["retcode"] != 0: raise RuntimeError(res["retmsg"])
31
+ return res["data"]
32
+ except RuntimeError as e:
33
+ raise e
34
+ except Exception as e:
35
+ cron_logger.error("resume parsing:" + str(e))
36
+
37
+ resume = remote_call()
38
+ print(json.dumps(resume, ensure_ascii=False, indent=2))
39
+
40
+ field_map = {
41
+ "name_kwd": "姓名/名字",
42
+ "gender_kwd": "性别(男,女)",
43
+ "age_int": "年龄/岁/年纪",
44
+ "phone_kwd": "电话/手机/微信",
45
+ "email_tks": "email/e-mail/邮箱",
46
+ "position_name_tks": "职位/职能/岗位/职责",
47
+ "expect_position_name_tks": "期望职位/期望职能/期望岗位",
48
+
49
+ "hightest_degree_kwd": "最高学历(高中,职高,硕士,本科,博士,初中,中技,中专,专科,专升本,MPA,MBA,EMBA)",
50
+ "first_degree_kwd": "第一学历(高中,职高,硕士,本科,博士,初中,中技,中专,专科,专升本,MPA,MBA,EMBA)",
51
+ "first_major_tks": "第一学历专业",
52
+ "first_school_name_tks": "第一学历毕业学校",
53
+ "edu_first_fea_kwd": "第一学历标签(211,留学,双一流,985,海外知名,重点大学,中专,专升本,专科,本科,大专)",
54
+
55
+ "degree_kwd": "过往学历(高中,职高,硕士,本科,博士,初中,中技,中专,专科,专升本,MPA,MBA,EMBA)",
56
+ "major_tks": "学过的专业/过往专业",
57
+ "school_name_tks": "学校/毕业院校",
58
+ "sch_rank_kwd": "学校标签(顶尖学校,精英学校,优质学校,一般学校)",
59
+ "edu_fea_kwd": "教育标签(211,留学,双一流,985,海外知名,重点大学,中专,专升本,专科,本科,大专)",
60
+
61
+ "work_exp_flt": "工作年限/工作年份/N年经验/毕业了多少年",
62
+ "birth_dt": "生日/出生年份",
63
+ "corp_nm_tks": "就职过的公司/之前的公司/上过班的公司",
64
+ "corporation_name_tks": "最近就职(上班)的公司/上一家公司",
65
+ "edu_end_int": "毕业年份",
66
+ "expect_city_names_tks": "期望城市",
67
+ "industry_name_tks": "所在行业"
68
+ }
69
+ titles = []
70
+ for n in ["name_kwd", "gender_kwd", "position_name_tks", "age_int"]:
71
+ v = resume.get(n, "")
72
+ if isinstance(v, list):v = v[0]
73
+ if n.find("tks") > 0: v = rmSpace(v)
74
+ titles.append(str(v))
75
+ doc = {
76
+ "docnm_kwd": filename,
77
+ "title_tks": huqie.qie("-".join(titles)+"-简历")
78
+ }
79
+ doc["title_sm_tks"] = huqie.qieqie(doc["title_tks"])
80
+ pairs = []
81
+ for n,m in field_map.items():
82
+ if not resume.get(n):continue
83
+ v = resume[n]
84
+ if isinstance(v, list):v = " ".join(v)
85
+ if n.find("tks") > 0: v = rmSpace(v)
86
+ pairs.append((m, str(v)))
87
+
88
+ doc["content_with_weight"] = "\n".join(["{}: {}".format(re.sub(r"([^()]+)", "", k), v) for k,v in pairs])
89
+ doc["content_ltks"] = huqie.qie(doc["content_with_weight"])
90
+ doc["content_sm_ltks"] = huqie.qieqie(doc["content_ltks"])
91
+ for n, _ in field_map.items(): doc[n] = resume[n]
92
+
93
+ print(doc)
94
+ KnowledgebaseService.update_parser_config(kwargs["kb_id"], {"field_map": field_map})
95
+ return [doc]
96
+
97
+
98
+ if __name__ == "__main__":
99
+ import sys
100
+ def dummy(a, b):
101
+ pass
102
+ chunk(sys.argv[1], callback=dummy)
rag/app/table.py CHANGED
@@ -1,13 +1,13 @@
1
  import copy
2
- import random
3
  import re
4
  from io import BytesIO
5
  from xpinyin import Pinyin
6
  import numpy as np
7
  import pandas as pd
8
- from nltk import word_tokenize
9
  from openpyxl import load_workbook
10
  from dateutil.parser import parse as datetime_parse
 
 
11
  from rag.parser import is_english, tokenize
12
  from rag.nlp import huqie, stemmer
13
 
@@ -27,18 +27,19 @@ class Excel(object):
27
  ws = wb[sheetname]
28
  rows = list(ws.rows)
29
  headers = [cell.value for cell in rows[0]]
30
- missed = set([i for i,h in enumerate(headers) if h is None])
31
- headers = [cell.value for i,cell in enumerate(rows[0]) if i not in missed]
32
  data = []
33
  for i, r in enumerate(rows[1:]):
34
- row = [cell.value for ii,cell in enumerate(r) if ii not in missed]
35
  if len(row) != len(headers):
36
  fails.append(str(i))
37
  continue
38
  data.append(row)
39
  done += 1
40
  if done % 999 == 0:
41
- callback(done * 0.6/total, ("Extract records: {}".format(len(res)) + (f"{len(fails)} failure({sheetname}), line: %s..."%(",".join(fails[:3])) if fails else "")))
 
42
  res.append(pd.DataFrame(np.array(data), columns=headers))
43
 
44
  callback(0.6, ("Extract records: {}. ".format(done) + (
@@ -61,9 +62,10 @@ def trans_bool(s):
61
  def column_data_type(arr):
62
  uni = len(set([a for a in arr if a is not None]))
63
  counts = {"int": 0, "float": 0, "text": 0, "datetime": 0, "bool": 0}
64
- trans = {t:f for f,t in [(int, "int"), (float, "float"), (trans_datatime, "datetime"), (trans_bool, "bool"), (str, "text")]}
 
65
  for a in arr:
66
- if a is None:continue
67
  if re.match(r"[+-]?[0-9]+(\.0+)?$", str(a).replace("%%", "")):
68
  counts["int"] += 1
69
  elif re.match(r"[+-]?[0-9.]+$", str(a).replace("%%", "")):
@@ -72,17 +74,18 @@ def column_data_type(arr):
72
  counts["bool"] += 1
73
  elif trans_datatime(str(a)):
74
  counts["datetime"] += 1
75
- else: counts["text"] += 1
76
- counts = sorted(counts.items(), key=lambda x: x[1]*-1)
 
77
  ty = counts[0][0]
78
  for i in range(len(arr)):
79
- if arr[i] is None:continue
80
  try:
81
  arr[i] = trans[ty](str(arr[i]))
82
  except Exception as e:
83
  arr[i] = None
84
  if ty == "text":
85
- if len(arr) > 128 and uni/len(arr) < 0.1:
86
  ty = "keyword"
87
  return arr, ty
88
 
@@ -123,48 +126,51 @@ def chunk(filename, binary=None, callback=None, **kwargs):
123
 
124
  dfs = [pd.DataFrame(np.array(rows), columns=headers)]
125
 
126
- else: raise NotImplementedError("file type not supported yet(excel, text, csv supported)")
 
127
 
128
  res = []
129
  PY = Pinyin()
130
  fieds_map = {"text": "_tks", "int": "_int", "keyword": "_kwd", "float": "_flt", "datetime": "_dt", "bool": "_kwd"}
131
  for df in dfs:
132
  for n in ["id", "_id", "index", "idx"]:
133
- if n in df.columns:del df[n]
134
  clmns = df.columns.values
135
  txts = list(copy.deepcopy(clmns))
136
  py_clmns = [PY.get_pinyins(n)[0].replace("-", "_") for n in clmns]
137
  clmn_tys = []
138
  for j in range(len(clmns)):
139
- cln,ty = column_data_type(df[clmns[j]])
140
  clmn_tys.append(ty)
141
  df[clmns[j]] = cln
142
  if ty == "text": txts.extend([str(c) for c in cln if c])
143
  clmns_map = [(py_clmns[j] + fieds_map[clmn_tys[j]], clmns[j]) for i in range(len(clmns))]
144
- # TODO: set this column map to KB parser configuration
145
 
146
  eng = is_english(txts)
147
- for ii,row in df.iterrows():
148
  d = {}
149
  row_txt = []
150
  for j in range(len(clmns)):
151
- if row[clmns[j]] is None:continue
152
  fld = clmns_map[j][0]
153
  d[fld] = row[clmns[j]] if clmn_tys[j] != "text" else huqie.qie(row[clmns[j]])
154
  row_txt.append("{}:{}".format(clmns[j], row[clmns[j]]))
155
- if not row_txt:continue
156
  tokenize(d, "; ".join(row_txt), eng)
157
- print(d)
158
  res.append(d)
 
 
159
  callback(0.6, "")
160
 
161
  return res
162
 
163
 
164
-
165
- if __name__== "__main__":
166
  import sys
 
 
167
  def dummy(a, b):
168
  pass
169
- chunk(sys.argv[1], callback=dummy)
170
 
 
 
 
1
  import copy
 
2
  import re
3
  from io import BytesIO
4
  from xpinyin import Pinyin
5
  import numpy as np
6
  import pandas as pd
 
7
  from openpyxl import load_workbook
8
  from dateutil.parser import parse as datetime_parse
9
+
10
+ from api.db.services.knowledgebase_service import KnowledgebaseService
11
  from rag.parser import is_english, tokenize
12
  from rag.nlp import huqie, stemmer
13
 
 
27
  ws = wb[sheetname]
28
  rows = list(ws.rows)
29
  headers = [cell.value for cell in rows[0]]
30
+ missed = set([i for i, h in enumerate(headers) if h is None])
31
+ headers = [cell.value for i, cell in enumerate(rows[0]) if i not in missed]
32
  data = []
33
  for i, r in enumerate(rows[1:]):
34
+ row = [cell.value for ii, cell in enumerate(r) if ii not in missed]
35
  if len(row) != len(headers):
36
  fails.append(str(i))
37
  continue
38
  data.append(row)
39
  done += 1
40
  if done % 999 == 0:
41
+ callback(done * 0.6 / total, ("Extract records: {}".format(len(res)) + (
42
+ f"{len(fails)} failure({sheetname}), line: %s..." % (",".join(fails[:3])) if fails else "")))
43
  res.append(pd.DataFrame(np.array(data), columns=headers))
44
 
45
  callback(0.6, ("Extract records: {}. ".format(done) + (
 
62
  def column_data_type(arr):
63
  uni = len(set([a for a in arr if a is not None]))
64
  counts = {"int": 0, "float": 0, "text": 0, "datetime": 0, "bool": 0}
65
+ trans = {t: f for f, t in
66
+ [(int, "int"), (float, "float"), (trans_datatime, "datetime"), (trans_bool, "bool"), (str, "text")]}
67
  for a in arr:
68
+ if a is None: continue
69
  if re.match(r"[+-]?[0-9]+(\.0+)?$", str(a).replace("%%", "")):
70
  counts["int"] += 1
71
  elif re.match(r"[+-]?[0-9.]+$", str(a).replace("%%", "")):
 
74
  counts["bool"] += 1
75
  elif trans_datatime(str(a)):
76
  counts["datetime"] += 1
77
+ else:
78
+ counts["text"] += 1
79
+ counts = sorted(counts.items(), key=lambda x: x[1] * -1)
80
  ty = counts[0][0]
81
  for i in range(len(arr)):
82
+ if arr[i] is None: continue
83
  try:
84
  arr[i] = trans[ty](str(arr[i]))
85
  except Exception as e:
86
  arr[i] = None
87
  if ty == "text":
88
+ if len(arr) > 128 and uni / len(arr) < 0.1:
89
  ty = "keyword"
90
  return arr, ty
91
 
 
126
 
127
  dfs = [pd.DataFrame(np.array(rows), columns=headers)]
128
 
129
+ else:
130
+ raise NotImplementedError("file type not supported yet(excel, text, csv supported)")
131
 
132
  res = []
133
  PY = Pinyin()
134
  fieds_map = {"text": "_tks", "int": "_int", "keyword": "_kwd", "float": "_flt", "datetime": "_dt", "bool": "_kwd"}
135
  for df in dfs:
136
  for n in ["id", "_id", "index", "idx"]:
137
+ if n in df.columns: del df[n]
138
  clmns = df.columns.values
139
  txts = list(copy.deepcopy(clmns))
140
  py_clmns = [PY.get_pinyins(n)[0].replace("-", "_") for n in clmns]
141
  clmn_tys = []
142
  for j in range(len(clmns)):
143
+ cln, ty = column_data_type(df[clmns[j]])
144
  clmn_tys.append(ty)
145
  df[clmns[j]] = cln
146
  if ty == "text": txts.extend([str(c) for c in cln if c])
147
  clmns_map = [(py_clmns[j] + fieds_map[clmn_tys[j]], clmns[j]) for i in range(len(clmns))]
 
148
 
149
  eng = is_english(txts)
150
+ for ii, row in df.iterrows():
151
  d = {}
152
  row_txt = []
153
  for j in range(len(clmns)):
154
+ if row[clmns[j]] is None: continue
155
  fld = clmns_map[j][0]
156
  d[fld] = row[clmns[j]] if clmn_tys[j] != "text" else huqie.qie(row[clmns[j]])
157
  row_txt.append("{}:{}".format(clmns[j], row[clmns[j]]))
158
+ if not row_txt: continue
159
  tokenize(d, "; ".join(row_txt), eng)
 
160
  res.append(d)
161
+
162
+ KnowledgebaseService.update_parser_config(kwargs["kb_id"], {"field_map": {k: v for k, v in clmns_map}})
163
  callback(0.6, "")
164
 
165
  return res
166
 
167
 
168
+ if __name__ == "__main__":
 
169
  import sys
170
+
171
+
172
  def dummy(a, b):
173
  pass
 
174
 
175
+
176
+ chunk(sys.argv[1], callback=dummy)
rag/nlp/search.py CHANGED
@@ -74,7 +74,9 @@ class Dealer:
74
  s = s.highlight("title_ltks")
75
  if not qst:
76
  s = s.sort(
77
- {"create_time": {"order": "desc", "unmapped_type": "date"}})
 
 
78
 
79
  if qst:
80
  s = s.highlight_options(
@@ -298,3 +300,22 @@ class Dealer:
298
  ranks["doc_aggs"][dnm] += 1
299
 
300
  return ranks
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  s = s.highlight("title_ltks")
75
  if not qst:
76
  s = s.sort(
77
+ {"create_time": {"order": "desc", "unmapped_type": "date"}},
78
+ {"create_timestamp_flt": {"order": "desc", "unmapped_type": "float"}}
79
+ )
80
 
81
  if qst:
82
  s = s.highlight_options(
 
300
  ranks["doc_aggs"][dnm] += 1
301
 
302
  return ranks
303
+
304
+ def sql_retrieval(self, sql, fetch_size=128):
305
+ sql = re.sub(r"[ ]+", " ", sql)
306
+ replaces = []
307
+ for r in re.finditer(r" ([a-z_]+_l?tks like |[a-z_]+_l?tks ?= ?)'([^']+)'", sql):
308
+ fld, v = r.group(1), r.group(2)
309
+ fld = re.sub(r" ?(like|=)$", "", fld).lower()
310
+ if v[0] == "%%": v = v[1:-1]
311
+ match = " MATCH({}, '{}', 'operator=OR;fuzziness=AUTO:1,3;minimum_should_match=30%') ".format(fld, huqie.qie(v))
312
+ replaces.append((r.group(1)+r.group(2), match))
313
+
314
+ for p, r in replaces: sql.replace(p, r)
315
+
316
+ try:
317
+ tbl = self.es.sql(sql, fetch_size)
318
+ return tbl
319
+ except Exception as e:
320
+ es_logger(f"SQL failure: {sql} =>" + str(e))
321
+
rag/nlp/surname.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #-*- coding: utf-8 -*-
2
+ m = set(["赵","钱","孙","李",
3
+ "周","吴","郑","王",
4
+ "冯","陈","褚","卫",
5
+ "蒋","沈","韩","杨",
6
+ "朱","秦","尤","许",
7
+ "何","吕","施","张",
8
+ "孔","曹","严","华",
9
+ "金","魏","陶","姜",
10
+ "戚","谢","邹","喻",
11
+ "柏","水","窦","章",
12
+ "云","苏","潘","葛",
13
+ "奚","范","彭","郎",
14
+ "鲁","韦","昌","马",
15
+ "苗","凤","花","方",
16
+ "俞","任","袁","柳",
17
+ "酆","鲍","史","唐",
18
+ "费","廉","岑","薛",
19
+ "雷","贺","倪","汤",
20
+ "滕","殷","罗","毕",
21
+ "郝","邬","安","常",
22
+ "乐","于","时","傅",
23
+ "皮","卞","齐","康",
24
+ "伍","余","元","卜",
25
+ "顾","孟","平","黄",
26
+ "和","穆","萧","尹",
27
+ "姚","邵","湛","汪",
28
+ "祁","毛","禹","狄",
29
+ "米","贝","明","臧",
30
+ "计","伏","成","戴",
31
+ "谈","宋","茅","庞",
32
+ "熊","纪","舒","屈",
33
+ "项","祝","董","梁",
34
+ "杜","阮","蓝","闵",
35
+ "席","季","麻","强",
36
+ "贾","路","娄","危",
37
+ "江","童","颜","郭",
38
+ "梅","盛","林","刁",
39
+ "钟","徐","邱","骆",
40
+ "高","夏","蔡","田",
41
+ "樊","胡","凌","霍",
42
+ "虞","万","支","柯",
43
+ "昝","管","卢","莫",
44
+ "经","房","裘","缪",
45
+ "干","解","应","宗",
46
+ "丁","宣","贲","邓",
47
+ "郁","单","杭","洪",
48
+ "包","诸","左","石",
49
+ "崔","吉","钮","龚",
50
+ "程","嵇","邢","滑",
51
+ "裴","陆","荣","翁",
52
+ "荀","羊","於","惠",
53
+ "甄","曲","家","封",
54
+ "芮","羿","储","靳",
55
+ "汲","邴","糜","松",
56
+ "井","段","富","巫",
57
+ "乌","焦","巴","弓",
58
+ "牧","隗","山","谷",
59
+ "车","侯","宓","蓬",
60
+ "全","郗","班","仰",
61
+ "秋","仲","伊","宫",
62
+ "宁","仇","栾","暴",
63
+ "甘","钭","厉","戎",
64
+ "祖","武","符","刘",
65
+ "景","詹","束","龙",
66
+ "叶","幸","司","韶",
67
+ "郜","黎","蓟","薄",
68
+ "印","宿","白","怀",
69
+ "蒲","邰","从","鄂",
70
+ "索","咸","籍","赖",
71
+ "卓","蔺","屠","蒙",
72
+ "池","乔","阴","鬱",
73
+ "胥","能","苍","双",
74
+ "闻","莘","党","翟",
75
+ "谭","贡","劳","逄",
76
+ "姬","申","扶","堵",
77
+ "冉","宰","郦","雍",
78
+ "郤","璩","桑","桂",
79
+ "濮","牛","寿","通",
80
+ "边","扈","燕","冀",
81
+ "郏","浦","尚","农",
82
+ "温","别","庄","晏",
83
+ "柴","瞿","阎","充",
84
+ "慕","连","茹","习",
85
+ "宦","艾","鱼","容",
86
+ "向","古","易","慎",
87
+ "戈","廖","庾","终",
88
+ "暨","居","衡","步",
89
+ "都","耿","满","弘",
90
+ "匡","国","文","寇",
91
+ "广","禄","阙","东",
92
+ "欧","殳","沃","利",
93
+ "蔚","越","夔","隆",
94
+ "师","巩","厍","聂",
95
+ "晁","勾","敖","融",
96
+ "冷","訾","辛","阚",
97
+ "那","简","饶","空",
98
+ "曾","母","沙","乜",
99
+ "养","鞠","须","丰",
100
+ "巢","关","蒯","相",
101
+ "查","后","荆","红",
102
+ "游","竺","权","逯",
103
+ "盖","益","桓","公",
104
+ "兰","原","乞","西","阿","肖","丑","位","曽","巨","德","代","圆","尉","仵","纳","仝","脱","丘","但","展","迪","付","覃","晗","特","隋","苑","奥","漆","谌","郄","练","扎","邝","渠","信","门","陳","化","原","密","泮","鹿","赫",
105
+ "万俟","司马","上官","欧阳",
106
+ "夏侯","诸葛","闻人","东方",
107
+ "赫连","皇甫","尉迟","公羊",
108
+ "澹台","公冶","宗政","濮阳",
109
+ "淳于","单于","太叔","申屠",
110
+ "公孙","仲孙","轩辕","令狐",
111
+ "钟离","宇文","长孙","慕容",
112
+ "鲜于","闾丘","司徒","司空",
113
+ "亓官","司寇","仉督","子车",
114
+ "颛孙","端木","巫马","公西",
115
+ "漆雕","乐正","壤驷","公良",
116
+ "拓跋","夹谷","宰父","榖梁",
117
+ "晋","楚","闫","法","汝","鄢","涂","钦",
118
+ "段干","百里","东郭","南门",
119
+ "呼延","归","海","羊舌","微","生",
120
+ "岳","帅","缑","亢","况","后","有","琴",
121
+ "梁丘","左丘","东门","西门",
122
+ "商","牟","佘","佴","伯","赏","南宫",
123
+ "墨","哈","谯","笪","年","爱","阳","佟",
124
+ "第五","言","福"])
125
+
126
+ def isit(n):return n.strip() in m
127
+
rag/svr/task_broker.py CHANGED
@@ -81,11 +81,13 @@ def dispatch():
81
  tsks = []
82
  if r["type"] == FileType.PDF.value:
83
  pages = HuParser.total_page_number(r["name"], MINIO.get(r["kb_id"], r["location"]))
84
- for p in range(0, pages, 10):
85
- task = new_task()
86
- task["from_page"] = p
87
- task["to_page"] = min(p + 10, pages)
88
- tsks.append(task)
 
 
89
  else:
90
  tsks.append(new_task())
91
  print(tsks)
 
81
  tsks = []
82
  if r["type"] == FileType.PDF.value:
83
  pages = HuParser.total_page_number(r["name"], MINIO.get(r["kb_id"], r["location"]))
84
+ for s,e in r["parser_config"].get("pages", [(0,100000)]):
85
+ e = min(e, pages)
86
+ for p in range(s, e, 10):
87
+ task = new_task()
88
+ task["from_page"] = p
89
+ task["to_page"] = min(p + 10, e)
90
+ tsks.append(task)
91
  else:
92
  tsks.append(new_task())
93
  print(tsks)
rag/svr/task_executor.py CHANGED
@@ -58,7 +58,7 @@ FACTORY = {
58
  }
59
 
60
 
61
- def set_progress(task_id, from_page, to_page, prog=None, msg="Processing..."):
62
  cancel = TaskService.do_cancel(task_id)
63
  if cancel:
64
  msg += " [Canceled]"
@@ -110,7 +110,7 @@ def collect(comm, mod, tm):
110
 
111
  def build(row, cvmdl):
112
  if row["size"] > DOC_MAXIMUM_SIZE:
113
- set_progress(row["id"], -1, "File size exceeds( <= %dMb )" %
114
  (int(DOC_MAXIMUM_SIZE / 1024 / 1024)))
115
  return []
116
 
@@ -119,7 +119,7 @@ def build(row, cvmdl):
119
  try:
120
  cron_logger.info("Chunkking {}/{}".format(row["location"], row["name"]))
121
  cks = chunker.chunk(row["name"], MINIO.get(row["kb_id"], row["location"]), row["from_page"], row["to_page"],
122
- callback)
123
  except Exception as e:
124
  if re.search("(No such file|not found)", str(e)):
125
  callback(-1, "Can not find file <%s>" % row["doc_name"])
@@ -144,6 +144,7 @@ def build(row, cvmdl):
144
  md5.update((ck["content_with_weight"] + str(d["doc_id"])).encode("utf-8"))
145
  d["_id"] = md5.hexdigest()
146
  d["create_time"] = str(datetime.datetime.now()).replace("T", " ")[:19]
 
147
  if not d.get("image"):
148
  docs.append(d)
149
  continue
@@ -197,15 +198,15 @@ def main(comm, mod):
197
 
198
  tmf = open(tm_fnm, "a+")
199
  for _, r in rows.iterrows():
 
200
  try:
201
  embd_mdl = LLMBundle(r["tenant_id"], LLMType.EMBEDDING)
202
  cv_mdl = LLMBundle(r["tenant_id"], LLMType.IMAGE2TEXT)
203
  # TODO: sequence2text model
204
  except Exception as e:
205
- set_progress(r["id"], -1, str(e))
206
  continue
207
 
208
- callback = partial(set_progress, r["id"], r["from_page"], r["to_page"])
209
  st_tm = timer()
210
  cks = build(r, cv_mdl)
211
  if not cks:
 
58
  }
59
 
60
 
61
+ def set_progress(task_id, from_page=0, to_page=-1, prog=None, msg="Processing..."):
62
  cancel = TaskService.do_cancel(task_id)
63
  if cancel:
64
  msg += " [Canceled]"
 
110
 
111
  def build(row, cvmdl):
112
  if row["size"] > DOC_MAXIMUM_SIZE:
113
+ set_progress(row["id"], prog=-1, msg="File size exceeds( <= %dMb )" %
114
  (int(DOC_MAXIMUM_SIZE / 1024 / 1024)))
115
  return []
116
 
 
119
  try:
120
  cron_logger.info("Chunkking {}/{}".format(row["location"], row["name"]))
121
  cks = chunker.chunk(row["name"], MINIO.get(row["kb_id"], row["location"]), row["from_page"], row["to_page"],
122
+ callback, kb_id=row["kb_id"])
123
  except Exception as e:
124
  if re.search("(No such file|not found)", str(e)):
125
  callback(-1, "Can not find file <%s>" % row["doc_name"])
 
144
  md5.update((ck["content_with_weight"] + str(d["doc_id"])).encode("utf-8"))
145
  d["_id"] = md5.hexdigest()
146
  d["create_time"] = str(datetime.datetime.now()).replace("T", " ")[:19]
147
+ d["create_timestamp_flt"] = datetime.datetime.now().timestamp()
148
  if not d.get("image"):
149
  docs.append(d)
150
  continue
 
198
 
199
  tmf = open(tm_fnm, "a+")
200
  for _, r in rows.iterrows():
201
+ callback = partial(set_progress, r["id"], r["from_page"], r["to_page"])
202
  try:
203
  embd_mdl = LLMBundle(r["tenant_id"], LLMType.EMBEDDING)
204
  cv_mdl = LLMBundle(r["tenant_id"], LLMType.IMAGE2TEXT)
205
  # TODO: sequence2text model
206
  except Exception as e:
207
+ callback(prog=-1, msg=str(e))
208
  continue
209
 
 
210
  st_tm = timer()
211
  cks = build(r, cv_mdl)
212
  if not cks:
rag/utils/es_conn.py CHANGED
@@ -3,13 +3,14 @@ import json
3
  import time
4
  import copy
5
  import elasticsearch
 
6
  from elasticsearch import Elasticsearch
7
  from elasticsearch_dsl import UpdateByQuery, Search, Index
8
  from rag.settings import es_logger
9
  from rag import settings
10
  from rag.utils import singleton
11
 
12
- es_logger.info("Elasticsearch version: "+ str(elasticsearch.__version__))
13
 
14
 
15
  @singleton
@@ -57,7 +58,7 @@ class HuEs:
57
  body=d,
58
  id=id,
59
  doc_type="doc",
60
- refresh=False,
61
  retry_on_conflict=100)
62
  else:
63
  r = self.es.update(
@@ -65,7 +66,7 @@ class HuEs:
65
  self.idxnm if not idxnm else idxnm),
66
  body=d,
67
  id=id,
68
- refresh=False,
69
  retry_on_conflict=100)
70
  es_logger.info("Successfully upsert: %s" % id)
71
  T = True
@@ -240,6 +241,18 @@ class HuEs:
240
  es_logger.error("ES search timeout for 3 times!")
241
  raise Exception("ES search timeout.")
242
 
 
 
 
 
 
 
 
 
 
 
 
 
243
  def get(self, doc_id, idxnm=None):
244
  for i in range(3):
245
  try:
@@ -308,7 +321,8 @@ class HuEs:
308
  try:
309
  r = self.es.delete_by_query(
310
  index=idxnm if idxnm else self.idxnm,
311
- body=Search().query(query).to_dict())
 
312
  return True
313
  except Exception as e:
314
  es_logger.error("ES updateByQuery deleteByQuery: " +
 
3
  import time
4
  import copy
5
  import elasticsearch
6
+ from elastic_transport import ConnectionTimeout
7
  from elasticsearch import Elasticsearch
8
  from elasticsearch_dsl import UpdateByQuery, Search, Index
9
  from rag.settings import es_logger
10
  from rag import settings
11
  from rag.utils import singleton
12
 
13
+ es_logger.info("Elasticsearch version: "+str(elasticsearch.__version__))
14
 
15
 
16
  @singleton
 
58
  body=d,
59
  id=id,
60
  doc_type="doc",
61
+ refresh=True,
62
  retry_on_conflict=100)
63
  else:
64
  r = self.es.update(
 
66
  self.idxnm if not idxnm else idxnm),
67
  body=d,
68
  id=id,
69
+ refresh=True,
70
  retry_on_conflict=100)
71
  es_logger.info("Successfully upsert: %s" % id)
72
  T = True
 
241
  es_logger.error("ES search timeout for 3 times!")
242
  raise Exception("ES search timeout.")
243
 
244
+ def sql(self, sql, fetch_size=128, format="json", timeout=2):
245
+ for i in range(3):
246
+ try:
247
+ res = self.es.sql.query(body={"query": sql, "fetch_size": fetch_size}, format=format, request_timeout=timeout)
248
+ return res
249
+ except ConnectionTimeout as e:
250
+ es_logger.error("Timeout【Q】:" + sql)
251
+ continue
252
+ es_logger.error("ES search timeout for 3 times!")
253
+ raise ConnectionTimeout()
254
+
255
+
256
  def get(self, doc_id, idxnm=None):
257
  for i in range(3):
258
  try:
 
321
  try:
322
  r = self.es.delete_by_query(
323
  index=idxnm if idxnm else self.idxnm,
324
+ refresh = True,
325
+ body=Search().query(query).to_dict())
326
  return True
327
  except Exception as e:
328
  es_logger.error("ES updateByQuery deleteByQuery: " +