Kevin Hu commited on
Commit
9c8f077
1 Parent(s): b4e6025

Fix raptor issue (#3737)

Browse files

### What problem does this PR solve?

#3732

### Type of change

- [x] Bug Fix (non-breaking change which fixes an issue)

Files changed (2) hide show
  1. rag/raptor.py +17 -13
  2. rag/svr/task_executor.py +1 -1
rag/raptor.py CHANGED
@@ -33,7 +33,7 @@ class RecursiveAbstractiveProcessing4TreeOrganizedRetrieval:
33
  self._prompt = prompt
34
  self._max_token = max_token
35
 
36
- def _get_optimal_clusters(self, embeddings: np.ndarray, random_state:int):
37
  max_clusters = min(self._max_cluster, len(embeddings))
38
  n_clusters = np.arange(1, max_clusters)
39
  bics = []
@@ -44,7 +44,7 @@ class RecursiveAbstractiveProcessing4TreeOrganizedRetrieval:
44
  optimal_clusters = n_clusters[np.argmin(bics)]
45
  return optimal_clusters
46
 
47
- def __call__(self, chunks: tuple[str, np.ndarray], random_state, callback=None):
48
  layers = [(0, len(chunks))]
49
  start, end = 0, len(chunks)
50
  if len(chunks) <= 1: return
@@ -54,13 +54,15 @@ class RecursiveAbstractiveProcessing4TreeOrganizedRetrieval:
54
  nonlocal chunks
55
  try:
56
  texts = [chunks[i][0] for i in ck_idx]
57
- len_per_chunk = int((self._llm_model.max_length - self._max_token)/len(texts))
58
  cluster_content = "\n".join([truncate(t, max(1, len_per_chunk)) for t in texts])
59
  cnt = self._llm_model.chat("You're a helpful assistant.",
60
- [{"role": "user", "content": self._prompt.format(cluster_content=cluster_content)}],
61
- {"temperature": 0.3, "max_tokens": self._max_token}
62
- )
63
- cnt = re.sub("(路路路路路路\n鐢变簬闀垮害鐨勫師鍥狅紝鍥炵瓟琚埅鏂簡锛岃缁х画鍚楋紵|For the content length reason, it stopped, continue?)", "", cnt)
 
 
64
  logging.debug(f"SUM: {cnt}")
65
  embds, _ = self._embd_model.encode([cnt])
66
  with lock:
@@ -74,10 +76,10 @@ class RecursiveAbstractiveProcessing4TreeOrganizedRetrieval:
74
  while end - start > 1:
75
  embeddings = [embd for _, embd in chunks[start: end]]
76
  if len(embeddings) == 2:
77
- summarize([start, start+1], Lock())
78
  if callback:
79
- callback(msg="Cluster one layer: {} -> {}".format(end-start, len(chunks)-end))
80
- labels.extend([0,0])
81
  layers.append((end, len(chunks)))
82
  start = end
83
  end = len(chunks)
@@ -85,7 +87,7 @@ class RecursiveAbstractiveProcessing4TreeOrganizedRetrieval:
85
 
86
  n_neighbors = int((len(embeddings) - 1) ** 0.8)
87
  reduced_embeddings = umap.UMAP(
88
- n_neighbors=max(2, n_neighbors), n_components=min(12, len(embeddings)-2), metric="cosine"
89
  ).fit_transform(embeddings)
90
  n_clusters = self._get_optimal_clusters(reduced_embeddings, random_state)
91
  if n_clusters == 1:
@@ -100,7 +102,7 @@ class RecursiveAbstractiveProcessing4TreeOrganizedRetrieval:
100
  with ThreadPoolExecutor(max_workers=12) as executor:
101
  threads = []
102
  for c in range(n_clusters):
103
- ck_idx = [i+start for i in range(len(lbls)) if lbls[i] == c]
104
  threads.append(executor.submit(summarize, ck_idx, lock))
105
  wait(threads, return_when=ALL_COMPLETED)
106
  logging.debug(str([t.result() for t in threads]))
@@ -109,7 +111,9 @@ class RecursiveAbstractiveProcessing4TreeOrganizedRetrieval:
109
  labels.extend(lbls)
110
  layers.append((end, len(chunks)))
111
  if callback:
112
- callback(msg="Cluster one layer: {} -> {}".format(end-start, len(chunks)-end))
113
  start = end
114
  end = len(chunks)
115
 
 
 
 
33
  self._prompt = prompt
34
  self._max_token = max_token
35
 
36
+ def _get_optimal_clusters(self, embeddings: np.ndarray, random_state: int):
37
  max_clusters = min(self._max_cluster, len(embeddings))
38
  n_clusters = np.arange(1, max_clusters)
39
  bics = []
 
44
  optimal_clusters = n_clusters[np.argmin(bics)]
45
  return optimal_clusters
46
 
47
+ def __call__(self, chunks, random_state, callback=None):
48
  layers = [(0, len(chunks))]
49
  start, end = 0, len(chunks)
50
  if len(chunks) <= 1: return
 
54
  nonlocal chunks
55
  try:
56
  texts = [chunks[i][0] for i in ck_idx]
57
+ len_per_chunk = int((self._llm_model.max_length - self._max_token) / len(texts))
58
  cluster_content = "\n".join([truncate(t, max(1, len_per_chunk)) for t in texts])
59
  cnt = self._llm_model.chat("You're a helpful assistant.",
60
+ [{"role": "user",
61
+ "content": self._prompt.format(cluster_content=cluster_content)}],
62
+ {"temperature": 0.3, "max_tokens": self._max_token}
63
+ )
64
+ cnt = re.sub("(路路路路路路\n鐢变簬闀垮害鐨勫師鍥狅紝鍥炵瓟琚埅鏂簡锛岃缁х画鍚楋紵|For the content length reason, it stopped, continue?)", "",
65
+ cnt)
66
  logging.debug(f"SUM: {cnt}")
67
  embds, _ = self._embd_model.encode([cnt])
68
  with lock:
 
76
  while end - start > 1:
77
  embeddings = [embd for _, embd in chunks[start: end]]
78
  if len(embeddings) == 2:
79
+ summarize([start, start + 1], Lock())
80
  if callback:
81
+ callback(msg="Cluster one layer: {} -> {}".format(end - start, len(chunks) - end))
82
+ labels.extend([0, 0])
83
  layers.append((end, len(chunks)))
84
  start = end
85
  end = len(chunks)
 
87
 
88
  n_neighbors = int((len(embeddings) - 1) ** 0.8)
89
  reduced_embeddings = umap.UMAP(
90
+ n_neighbors=max(2, n_neighbors), n_components=min(12, len(embeddings) - 2), metric="cosine"
91
  ).fit_transform(embeddings)
92
  n_clusters = self._get_optimal_clusters(reduced_embeddings, random_state)
93
  if n_clusters == 1:
 
102
  with ThreadPoolExecutor(max_workers=12) as executor:
103
  threads = []
104
  for c in range(n_clusters):
105
+ ck_idx = [i + start for i in range(len(lbls)) if lbls[i] == c]
106
  threads.append(executor.submit(summarize, ck_idx, lock))
107
  wait(threads, return_when=ALL_COMPLETED)
108
  logging.debug(str([t.result() for t in threads]))
 
111
  labels.extend(lbls)
112
  layers.append((end, len(chunks)))
113
  if callback:
114
+ callback(msg="Cluster one layer: {} -> {}".format(end - start, len(chunks) - end))
115
  start = end
116
  end = len(chunks)
117
 
118
+ return chunks
119
+
rag/svr/task_executor.py CHANGED
@@ -344,7 +344,7 @@ def run_raptor(row, chat_mdl, embd_mdl, callback=None):
344
  row["parser_config"]["raptor"]["threshold"]
345
  )
346
  original_length = len(chunks)
347
- raptor(chunks, row["parser_config"]["raptor"]["random_seed"], callback)
348
  doc = {
349
  "doc_id": row["doc_id"],
350
  "kb_id": [str(row["kb_id"])],
 
344
  row["parser_config"]["raptor"]["threshold"]
345
  )
346
  original_length = len(chunks)
347
+ chunks = raptor(chunks, row["parser_config"]["raptor"]["random_seed"], callback)
348
  doc = {
349
  "doc_id": row["doc_id"],
350
  "kb_id": [str(row["kb_id"])],