ecceembusra commited on
Commit
b3f63ab
·
verified ·
1 Parent(s): 094ce0e

Update rag_pipeline.py

Browse files
Files changed (1) hide show
  1. rag_pipeline.py +116 -97
rag_pipeline.py CHANGED
@@ -6,24 +6,23 @@ from pathlib import Path
6
 
7
  import faiss
8
  import numpy as np
 
9
 
10
  from providers import embed, generate, rerank, qa_extract
11
 
12
- try:
13
- from huggingface_hub import hf_hub_download
14
- except Exception:
15
- hf_hub_download = None # requirements'ta yoksa problem değil
16
-
17
  # =========================
18
- # Varsayılan yollar
19
  # =========================
20
- VSTORE_DIR = "vectorstore"
21
- FAISS_FILE = "index.faiss"
22
- META_JSONL = "meta.jsonl"
23
 
24
- # =========================
 
 
 
 
 
 
 
25
  # Hız / kalite ayarları
26
- # =========================
27
  TOP_K_DEFAULT = 4
28
  FETCH_K_DEFAULT = 16
29
  HNSW_EFSEARCH = 32
@@ -40,11 +39,12 @@ W_LEXICAL = 0.15
40
  # =========================
41
  # Kural-tabanlı yardımcılar
42
  # =========================
 
43
  DATE_RX = re.compile(
44
  r"\b(\d{1,2}\s+(Ocak|Şubat|Mart|Nisan|Mayıs|Haziran|Temmuz|Ağustos|Eylül|Ekim|Kasım|Aralık)\s+\d{3,4}"
45
  r"|\d{1,2}\.\d{1,2}\.\d{2,4}"
46
  r"|\d{4})\b",
47
- flags=re.IGNORECASE
48
  )
49
  DEATH_KEYS = ["öldü", "vefat", "hayatını kaybet", "ölümü", "ölüm"]
50
  FOUND_KEYS = ["kuruldu", "kuruluş", "kurulmuştur", "kuruluşu", "kuruluş tarihi"]
@@ -63,6 +63,7 @@ def _extract_fact_sentence(query: str, hits: List[Dict]) -> Tuple[str, str]:
63
  keylist = FOUND_KEYS
64
  else:
65
  keylist = DEATH_KEYS + FOUND_KEYS
 
66
  for h in hits:
67
  sents = _split_sentences(h.get("text", ""))
68
  for s in sents:
@@ -70,19 +71,29 @@ def _extract_fact_sentence(query: str, hits: List[Dict]) -> Tuple[str, str]:
70
  return s, h.get("source", "")
71
  return "", ""
72
 
 
 
 
 
73
  NAME_RX = re.compile(r"\b([A-ZÇĞİIÖŞÜ][a-zçğıiöşü]+(?:\s+[A-ZÇĞİIÖŞÜ][a-zçğıiöşü]+){0,3})\b")
74
 
75
  def _expand_named_span(answer: str, hits: List[Dict]) -> str:
76
  ans = (answer or "").strip()
77
  if not ans or len(ans.split()) > 2:
78
  return ans
 
79
  ans_low = ans.lower()
80
- preferred_aliases = ["Mustafa Kemal Atatürk", "Sabiha Gökçen", "İsmet İnönü"]
 
 
 
 
81
  for h in hits:
82
  text = h.get("text", "")
83
  for alias in preferred_aliases:
84
- if ans_low in alias.lower() and alias in text:
85
  return alias
 
86
  best = ans
87
  for h in hits:
88
  for sent in _split_sentences(h.get("text", "")):
@@ -90,99 +101,86 @@ def _expand_named_span(answer: str, hits: List[Dict]) -> str:
90
  continue
91
  for m in NAME_RX.finditer(sent):
92
  cand = m.group(1).strip()
93
- if ans_low in cand.lower():
94
- if len(cand) >= len(best) and any(ch.islower() for ch in cand):
95
- best = cand if len(cand.split()) >= len(best.split()) else best
96
  return best
97
 
98
- # -------------------------
99
- # Hugging Face Hub fallback
100
- # -------------------------
101
- def _hub_download_if_possible(rel_path: str) -> str:
102
- if hf_hub_download is None:
103
- return ""
104
- repo_id = os.getenv("HF_SPACE_ID") or os.getenv("SPACE_ID")
105
- if not repo_id:
106
- return ""
107
- try:
108
- return hf_hub_download(repo_id=repo_id, filename=rel_path, repo_type="space")
109
- except Exception:
110
- return ""
111
-
112
  # =========================
113
- # **BURASI KRİTİK**: Vektör deposunu yükle
114
  # =========================
115
- def load_vectorstore(vstore_dir: str = "vectorstore"):
 
116
  """
117
- 1) Aşağıdaki aday klasörlerde dosyaları sırayla ara:
118
- - verilen vstore_dir
119
- - çalışma dizini / vstore_dir
120
- - bu dosyanın 1 üst klasörü (repo kökü) / vstore_dir
121
- - /home/user / vstore_dir (HF Spaces tipik kök)
122
- 2) Yerelde bulunamazsa aynı Space deposundan (repo_type='space') indir.
123
  """
124
- # Aday dizinler
125
- candidates = []
126
- try:
127
- this_dir = Path(__file__).resolve().parent
128
- repo_root = this_dir.parent
129
- except Exception:
130
- this_dir = Path.cwd()
131
- repo_root = Path.cwd()
132
-
133
- candidates += [
134
- Path(vstore_dir),
135
- Path.cwd() / vstore_dir,
136
- repo_root / vstore_dir,
137
- Path("/home/user") / vstore_dir, # HF Spaces common
138
- Path("/home/user/app") / vstore_dir, # bazı runner'lar
139
- ]
140
 
141
- faiss_path = ""
142
- meta_path = ""
143
- for base in candidates:
144
- f = base / FAISS_FILE
145
- m = base / META_JSONL
146
- if f.exists() and m.exists():
147
- faiss_path, meta_path = str(f), str(m)
148
- break
149
 
150
- # Hub fallback
151
- if not (faiss_path and meta_path):
152
- hub_f = _hub_download_if_possible(f"{VSTORE_DIR}/{FAISS_FILE}")
153
- hub_m = _hub_download_if_possible(f"{VSTORE_DIR}/{META_JSONL}")
154
- if hub_f and hub_m and os.path.exists(hub_f) and os.path.exists(hub_m):
155
- faiss_path, meta_path = hub_f, hub_m
156
 
157
- if not (faiss_path and meta_path):
 
 
 
 
 
 
 
 
 
158
  raise FileNotFoundError(
159
- "'vectorstore/index.faiss' indirilemedi veya bulunamadı. "
160
- "Lütfen bu dosyayı Space deposunda 'vectorstore/' klasörüne yükleyin."
161
  )
162
 
163
- index = faiss.read_index(faiss_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164
  try:
165
  index.hnsw.efSearch = HNSW_EFSEARCH
166
  except Exception:
167
  pass
168
 
 
169
  records: List[Dict] = []
170
  with open(meta_path, "r", encoding="utf-8") as f:
171
  for line in f:
172
  obj = json.loads(line)
173
- md = obj.get("metadata", {}) or {}
174
  records.append({
175
  "text": obj.get("text", ""),
176
- "title": md.get("title", ""),
177
- "source": md.get("source", ""),
178
  })
 
179
  if not records:
180
  raise RuntimeError("meta.jsonl boş görünüyor.")
181
  return index, records
182
 
183
  # =========================
184
- # Arama + yeniden sıralama
185
  # =========================
 
186
  _CAP_WORD = re.compile(r"\b([A-ZÇĞİIÖŞÜ][a-zçğıiöşü]+(?:\s+[A-ZÇĞİIÖŞÜ][a-zçğıiöşü]+)*)\b")
187
 
188
  def _keywords_from_query(q: str) -> List[str]:
@@ -203,27 +201,36 @@ def _lexical_overlap(q_tokens: List[str], text: str) -> float:
203
  denom = len(qset) or 1
204
  return inter / denom
205
 
 
 
 
 
206
  @lru_cache(maxsize=256)
207
  def _cached_query_vec(e5_query: str) -> np.ndarray:
208
  v = embed([e5_query]).astype("float32")
209
  return v
210
 
211
- def search_chunks(query: str, index: faiss.Index, records: List[Dict],
212
- top_k: int = TOP_K_DEFAULT, fetch_k: int = FETCH_K_DEFAULT) -> List[Dict]:
 
 
 
 
 
213
  q = (query or "").strip()
214
- q_e5 = "query: " + q
215
- q_vec = _cached_query_vec(q_e5)
216
  faiss.normalize_L2(q_vec)
 
217
  scores, idxs = index.search(q_vec, fetch_k)
218
 
219
  pool: List[Dict] = []
220
  for i, s in zip(idxs[0], scores[0]):
221
  if 0 <= i < len(records):
222
- md = records[i]
223
  pool.append({
224
- "text": md["text"],
225
- "title": md.get("title", ""),
226
- "source": md.get("source", ""),
227
  "score_vec": float(s),
228
  })
229
  if not pool:
@@ -238,31 +245,33 @@ def search_chunks(query: str, index: faiss.Index, records: List[Dict],
238
  lex = _lexical_overlap(q_tokens_lower, p["text"]) * W_LEXICAL
239
  p["score_boosted"] = p["score_vec"] + title_boost + lex
240
 
241
- pool_by_boost = sorted(pool, key=lambda x: x["score_boosted"], reverse=True)
242
 
243
- if len(pool_by_boost) >= 2:
244
- top1, top2 = pool_by_boost[0]["score_boosted"], pool_by_boost[1]["score_boosted"]
245
  else:
246
- top1, top2 = pool_by_boost[0]["score_boosted"], 0.0
247
  do_rerank = not (top1 >= HIGH_SCORE_THRES and (top1 - top2) >= MARGIN_THRES)
248
 
249
  if do_rerank:
250
- rs = rerank(q, [p["text"] for p in pool_by_boost])
251
- for p, r in zip(pool_by_boost, rs):
252
  p["score_rerank"] = float(r)
253
- pool_by_boost.sort(key=lambda x: (x.get("score_rerank", 0.0), x["score_boosted"]), reverse=True)
254
 
255
- return pool_by_boost[:top_k]
256
 
257
  # =========================
258
  # LLM bağlamı ve kaynak listesi
259
  # =========================
 
260
  def _format_sources(hits: List[Dict]) -> str:
261
  seen, urls = set(), []
262
  for h in hits:
263
  u = (h.get("source") or "").strip()
264
  if u and u not in seen:
265
- urls.append(u); seen.add(u)
 
266
  return "\n".join(f"- {u}" for u in urls) if urls else "- (yok)"
267
 
268
  def _llm_context(hits: List[Dict], limit: int = CTX_CHAR_LIMIT) -> str:
@@ -271,13 +280,20 @@ def _llm_context(hits: List[Dict], limit: int = CTX_CHAR_LIMIT) -> str:
271
  block = f"[{i}] {h.get('title','')} — {h.get('source','')}\n{h.get('text','')}"
272
  if total + len(block) > limit:
273
  break
274
- ctx.append(block); total += len(block)
 
275
  return "\n\n---\n\n".join(ctx)
276
 
277
  # =========================
278
- # Nihai cevap
279
  # =========================
280
- def generate_answer(query: str, index: faiss.Index, records: List[Dict], top_k: int = TOP_K_DEFAULT) -> str:
 
 
 
 
 
 
281
  hits = search_chunks(query, index, records, top_k=top_k)
282
  if not hits:
283
  return "Bilgi bulunamadı."
@@ -319,10 +335,12 @@ def generate_answer(query: str, index: faiss.Index, records: List[Dict], top_k:
319
  f"Soru:\n{query}\n\nBağlam:\n{context}\n\nYanıtı 1-2 cümlede ver."
320
  )
321
  llm_ans = (generate(prompt) or "").strip()
 
322
  if (not llm_ans) or ("yapılandırılmadı" in llm_ans.lower()):
323
  text = hits[0].get("text", "")
324
  first = re.split(r"(?<=[.!?])\s+", text.strip())[:2]
325
  llm_ans = " ".join(first).strip() or "Verilen bağlamda bu sorunun cevabı bulunmamaktadır."
 
326
  if "Kaynaklar:" not in llm_ans:
327
  llm_ans += "\n\nKaynaklar:\n" + _format_sources(hits)
328
  return llm_ans
@@ -330,6 +348,7 @@ def generate_answer(query: str, index: faiss.Index, records: List[Dict], top_k:
330
  # =========================
331
  # Hızlı test
332
  # =========================
 
333
  if __name__ == "__main__":
334
  idx, recs = load_vectorstore()
335
  for q in [
 
6
 
7
  import faiss
8
  import numpy as np
9
+ from huggingface_hub import hf_hub_download
10
 
11
  from providers import embed, generate, rerank, qa_extract
12
 
 
 
 
 
 
13
  # =========================
14
+ # Dosya yolları ve sabitler
15
  # =========================
 
 
 
16
 
17
+ # Çalışma kökü (Space içinde /home/user/app)
18
+ APP_ROOT = Path.cwd()
19
+
20
+ # Varsayılan vektör klasörü
21
+ VSTORE_DIR = APP_ROOT / "vectorstore"
22
+ FAISS_FILE = "index.faiss"
23
+ META_JSONL = "meta.jsonl"
24
+
25
  # Hız / kalite ayarları
 
26
  TOP_K_DEFAULT = 4
27
  FETCH_K_DEFAULT = 16
28
  HNSW_EFSEARCH = 32
 
39
  # =========================
40
  # Kural-tabanlı yardımcılar
41
  # =========================
42
+
43
  DATE_RX = re.compile(
44
  r"\b(\d{1,2}\s+(Ocak|Şubat|Mart|Nisan|Mayıs|Haziran|Temmuz|Ağustos|Eylül|Ekim|Kasım|Aralık)\s+\d{3,4}"
45
  r"|\d{1,2}\.\d{1,2}\.\d{2,4}"
46
  r"|\d{4})\b",
47
+ flags=re.IGNORECASE,
48
  )
49
  DEATH_KEYS = ["öldü", "vefat", "hayatını kaybet", "ölümü", "ölüm"]
50
  FOUND_KEYS = ["kuruldu", "kuruluş", "kurulmuştur", "kuruluşu", "kuruluş tarihi"]
 
63
  keylist = FOUND_KEYS
64
  else:
65
  keylist = DEATH_KEYS + FOUND_KEYS
66
+
67
  for h in hits:
68
  sents = _split_sentences(h.get("text", ""))
69
  for s in sents:
 
71
  return s, h.get("source", "")
72
  return "", ""
73
 
74
+ # =========================
75
+ # İsim normalizasyonu
76
+ # =========================
77
+
78
  NAME_RX = re.compile(r"\b([A-ZÇĞİIÖŞÜ][a-zçğıiöşü]+(?:\s+[A-ZÇĞİIÖŞÜ][a-zçğıiöşü]+){0,3})\b")
79
 
80
  def _expand_named_span(answer: str, hits: List[Dict]) -> str:
81
  ans = (answer or "").strip()
82
  if not ans or len(ans.split()) > 2:
83
  return ans
84
+
85
  ans_low = ans.lower()
86
+ preferred_aliases = [
87
+ "Mustafa Kemal Atatürk",
88
+ "Sabiha Gökçen",
89
+ "İsmet İnönü",
90
+ ]
91
  for h in hits:
92
  text = h.get("text", "")
93
  for alias in preferred_aliases:
94
+ if alias.lower().find(ans_low) != -1 and alias in text:
95
  return alias
96
+
97
  best = ans
98
  for h in hits:
99
  for sent in _split_sentences(h.get("text", "")):
 
101
  continue
102
  for m in NAME_RX.finditer(sent):
103
  cand = m.group(1).strip()
104
+ if ans_low in cand.lower() and any(ch.islower() for ch in cand):
105
+ if len(cand.split()) >= len(best.split()):
106
+ best = cand
107
  return best
108
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
  # =========================
110
+ # Vektör deposunu yükle
111
  # =========================
112
+
113
+ def _ensure_local_file(rel_path: str) -> Path:
114
  """
115
+ Eğer ./vectorstore/<dosya> yoksa, aynı Space reposundan indir.
116
+ REPO_ID otomatik olarak SPACE_ID (owner/space) ile doldurulur.
 
 
 
 
117
  """
118
+ local_path = APP_ROOT / rel_path
119
+ if local_path.exists():
120
+ return local_path
 
 
 
 
 
 
 
 
 
 
 
 
 
121
 
122
+ local_path.parent.mkdir(parents=True, exist_ok=True)
 
 
 
 
 
 
 
123
 
124
+ repo_id = os.environ.get("REPO_ID") or os.environ.get("SPACE_ID")
125
+ if not repo_id:
126
+ raise FileNotFoundError(
127
+ f"'{rel_path}' bulunamadı ve REPO_ID/SPACE_ID çevre değişkeni setli değil."
128
+ )
 
129
 
130
+ try:
131
+ # hf_hub_download hedef dosyayı tmp'e indirir; biz local_path'e kopyalarız
132
+ downloaded = hf_hub_download(
133
+ repo_id=repo_id,
134
+ filename=rel_path, # örn: 'vectorstore/index.faiss'
135
+ local_dir=str(APP_ROOT),
136
+ local_dir_use_symlinks=False,
137
+ )
138
+ return Path(downloaded)
139
+ except Exception as e:
140
  raise FileNotFoundError(
141
+ f"'{rel_path}' indirilemedi veya bulunamadı. Lütfen bu dosyayı Space deposunda "
142
+ f"'vectorstore/' klasörüne yükleyin. Ayrıntı: {e}"
143
  )
144
 
145
+ def load_vectorstore(vstore_dir: str | Path = VSTORE_DIR) -> Tuple[faiss.Index, List[Dict]]:
146
+ """
147
+ Önce yerelden yükler; yoksa aynı Space’ten indirip kaydeder.
148
+ app.py içinden hiçbir ek değişiklik yapmadan çalışır.
149
+ """
150
+ vstore_dir = Path(vstore_dir)
151
+ faiss_rel = str(Path("vectorstore") / FAISS_FILE)
152
+ meta_rel = str(Path("vectorstore") / META_JSONL)
153
+
154
+ # Yereli sağlama/alma
155
+ faiss_path = _ensure_local_file(faiss_rel)
156
+ meta_path = _ensure_local_file(meta_rel)
157
+
158
+ # FAISS index
159
+ index = faiss.read_index(str(faiss_path))
160
  try:
161
  index.hnsw.efSearch = HNSW_EFSEARCH
162
  except Exception:
163
  pass
164
 
165
+ # Kayıtları oku
166
  records: List[Dict] = []
167
  with open(meta_path, "r", encoding="utf-8") as f:
168
  for line in f:
169
  obj = json.loads(line)
 
170
  records.append({
171
  "text": obj.get("text", ""),
172
+ "title": (obj.get("metadata") or {}).get("title", ""),
173
+ "source": (obj.get("metadata") or {}).get("source", ""),
174
  })
175
+
176
  if not records:
177
  raise RuntimeError("meta.jsonl boş görünüyor.")
178
  return index, records
179
 
180
  # =========================
181
+ # Anahtar kelime çıkarımı + lexical puan
182
  # =========================
183
+
184
  _CAP_WORD = re.compile(r"\b([A-ZÇĞİIÖŞÜ][a-zçğıiöşü]+(?:\s+[A-ZÇĞİIÖŞÜ][a-zçğıiöşü]+)*)\b")
185
 
186
  def _keywords_from_query(q: str) -> List[str]:
 
201
  denom = len(qset) or 1
202
  return inter / denom
203
 
204
+ # =========================
205
+ # Retrieval + (koşullu) rerank
206
+ # =========================
207
+
208
  @lru_cache(maxsize=256)
209
  def _cached_query_vec(e5_query: str) -> np.ndarray:
210
  v = embed([e5_query]).astype("float32")
211
  return v
212
 
213
+ def search_chunks(
214
+ query: str,
215
+ index: faiss.Index,
216
+ records: List[Dict],
217
+ top_k: int = TOP_K_DEFAULT,
218
+ fetch_k: int = FETCH_K_DEFAULT,
219
+ ) -> List[Dict]:
220
  q = (query or "").strip()
221
+ q_vec = _cached_query_vec("query: " + q)
 
222
  faiss.normalize_L2(q_vec)
223
+
224
  scores, idxs = index.search(q_vec, fetch_k)
225
 
226
  pool: List[Dict] = []
227
  for i, s in zip(idxs[0], scores[0]):
228
  if 0 <= i < len(records):
229
+ r = records[i]
230
  pool.append({
231
+ "text": r["text"],
232
+ "title": r.get("title", ""),
233
+ "source": r.get("source", ""),
234
  "score_vec": float(s),
235
  })
236
  if not pool:
 
245
  lex = _lexical_overlap(q_tokens_lower, p["text"]) * W_LEXICAL
246
  p["score_boosted"] = p["score_vec"] + title_boost + lex
247
 
248
+ pool.sort(key=lambda x: x["score_boosted"], reverse=True)
249
 
250
+ if len(pool) >= 2:
251
+ top1, top2 = pool[0]["score_boosted"], pool[1]["score_boosted"]
252
  else:
253
+ top1, top2 = pool[0]["score_boosted"], 0.0
254
  do_rerank = not (top1 >= HIGH_SCORE_THRES and (top1 - top2) >= MARGIN_THRES)
255
 
256
  if do_rerank:
257
+ rs = rerank(q, [p["text"] for p in pool])
258
+ for p, r in zip(pool, rs):
259
  p["score_rerank"] = float(r)
260
+ pool.sort(key=lambda x: (x.get("score_rerank", 0.0), x["score_boosted"]), reverse=True)
261
 
262
+ return pool[:top_k]
263
 
264
  # =========================
265
  # LLM bağlamı ve kaynak listesi
266
  # =========================
267
+
268
  def _format_sources(hits: List[Dict]) -> str:
269
  seen, urls = set(), []
270
  for h in hits:
271
  u = (h.get("source") or "").strip()
272
  if u and u not in seen:
273
+ urls.append(u)
274
+ seen.add(u)
275
  return "\n".join(f"- {u}" for u in urls) if urls else "- (yok)"
276
 
277
  def _llm_context(hits: List[Dict], limit: int = CTX_CHAR_LIMIT) -> str:
 
280
  block = f"[{i}] {h.get('title','')} — {h.get('source','')}\n{h.get('text','')}"
281
  if total + len(block) > limit:
282
  break
283
+ ctx.append(block)
284
+ total += len(block)
285
  return "\n\n---\n\n".join(ctx)
286
 
287
  # =========================
288
+ # Nihai cevap (kural → QA → LLM → fallback)
289
  # =========================
290
+
291
+ def generate_answer(
292
+ query: str,
293
+ index: faiss.Index,
294
+ records: List[Dict],
295
+ top_k: int = TOP_K_DEFAULT,
296
+ ) -> str:
297
  hits = search_chunks(query, index, records, top_k=top_k)
298
  if not hits:
299
  return "Bilgi bulunamadı."
 
335
  f"Soru:\n{query}\n\nBağlam:\n{context}\n\nYanıtı 1-2 cümlede ver."
336
  )
337
  llm_ans = (generate(prompt) or "").strip()
338
+
339
  if (not llm_ans) or ("yapılandırılmadı" in llm_ans.lower()):
340
  text = hits[0].get("text", "")
341
  first = re.split(r"(?<=[.!?])\s+", text.strip())[:2]
342
  llm_ans = " ".join(first).strip() or "Verilen bağlamda bu sorunun cevabı bulunmamaktadır."
343
+
344
  if "Kaynaklar:" not in llm_ans:
345
  llm_ans += "\n\nKaynaklar:\n" + _format_sources(hits)
346
  return llm_ans
 
348
  # =========================
349
  # Hızlı test
350
  # =========================
351
+
352
  if __name__ == "__main__":
353
  idx, recs = load_vectorstore()
354
  for q in [