ecceembusra commited on
Commit
c02cbb3
·
verified ·
1 Parent(s): 3e78cab

Update rag_pipeline.py

Browse files
Files changed (1) hide show
  1. rag_pipeline.py +84 -298
rag_pipeline.py CHANGED
@@ -1,367 +1,153 @@
1
  # rag_pipeline.py
2
- import os, json, re
3
  from typing import List, Dict, Tuple
4
  from functools import lru_cache
5
- from pathlib import Path
6
-
7
  import faiss
8
  import numpy as np
9
- from huggingface_hub import hf_hub_download
10
-
11
  from providers import embed, generate, rerank, qa_extract
12
 
13
  # =========================
14
- # Dosya yolları ve sabitler
15
  # =========================
16
-
17
- # Çalışma kökü (Space içinde /home/user/app)
18
- APP_ROOT = Path.cwd()
19
-
20
- # Varsayılan vektör klasörü
21
- VSTORE_DIR = APP_ROOT / "vectorstore"
22
- FAISS_FILE = "index.faiss"
23
- META_JSONL = "meta.jsonl"
24
-
25
- # Hız / kalite ayarları
26
- TOP_K_DEFAULT = 4
27
- FETCH_K_DEFAULT = 16
28
- HNSW_EFSEARCH = 32
29
- HIGH_SCORE_THRES = 0.78
30
- MARGIN_THRES = 0.06
31
-
32
- CTX_CHAR_LIMIT = 1400
33
- QA_SCORE_THRES = 0.25
34
- QA_PER_PASSAGES = 4
35
-
36
- W_TITLE_BOOST = 0.25
37
- W_LEXICAL = 0.15
38
 
39
  # =========================
40
- # Kural-tabanlı yardımcılar
41
  # =========================
42
-
43
  DATE_RX = re.compile(
44
  r"\b(\d{1,2}\s+(Ocak|Şubat|Mart|Nisan|Mayıs|Haziran|Temmuz|Ağustos|Eylül|Ekim|Kasım|Aralık)\s+\d{3,4}"
45
  r"|\d{1,2}\.\d{1,2}\.\d{2,4}"
46
  r"|\d{4})\b",
47
  flags=re.IGNORECASE,
48
  )
49
- DEATH_KEYS = ["öldü", "vefat", "hayatını kaybet", "ölümü", "ölüm"]
50
- FOUND_KEYS = ["kuruldu", "kuruluş", "kurulmuştur", "kuruluşu", "kuruluş tarihi"]
51
-
52
- def _split_sentences(txt: str) -> List[str]:
53
- parts = re.split(r"(?<=[.!?])\s+", (txt or "").strip())
54
- return [p.strip() for p in parts if p.strip()]
55
-
56
- def _extract_fact_sentence(query: str, hits: List[Dict]) -> Tuple[str, str]:
57
- q = (query or "").lower()
58
- if "ne zaman" not in q:
59
- return "", ""
60
- if any(k in q for k in ["öldü", "vefat", "ölümü", "ölüm"]):
61
- keylist = DEATH_KEYS
62
- elif any(k in q for k in ["kuruldu", "kuruluş"]):
63
- keylist = FOUND_KEYS
64
- else:
65
- keylist = DEATH_KEYS + FOUND_KEYS
66
-
67
- for h in hits:
68
- sents = _split_sentences(h.get("text", ""))
69
- for s in sents:
70
- if any(k in s.lower() for k in keylist) and DATE_RX.search(s):
71
- return s, h.get("source", "")
72
- return "", ""
73
-
74
- # =========================
75
- # İsim normalizasyonu
76
- # =========================
77
-
78
- NAME_RX = re.compile(r"\b([A-ZÇĞİIÖŞÜ][a-zçğıiöşü]+(?:\s+[A-ZÇĞİIÖŞÜ][a-zçğıiöşü]+){0,3})\b")
79
-
80
- def _expand_named_span(answer: str, hits: List[Dict]) -> str:
81
- ans = (answer or "").strip()
82
- if not ans or len(ans.split()) > 2:
83
- return ans
84
-
85
- ans_low = ans.lower()
86
- preferred_aliases = [
87
- "Mustafa Kemal Atatürk",
88
- "Sabiha Gökçen",
89
- "İsmet İnönü",
90
- ]
91
- for h in hits:
92
- text = h.get("text", "")
93
- for alias in preferred_aliases:
94
- if alias.lower().find(ans_low) != -1 and alias in text:
95
- return alias
96
-
97
- best = ans
98
- for h in hits:
99
- for sent in _split_sentences(h.get("text", "")):
100
- if ans_low not in sent.lower():
101
- continue
102
- for m in NAME_RX.finditer(sent):
103
- cand = m.group(1).strip()
104
- if ans_low in cand.lower() and any(ch.islower() for ch in cand):
105
- if len(cand.split()) >= len(best.split()):
106
- best = cand
107
- return best
108
 
109
  # =========================
110
- # Vektör deposunu yükle
111
  # =========================
 
 
112
 
113
- def _ensure_local_file(rel_path: str) -> Path:
114
- """
115
- Eğer ./vectorstore/<dosya> yoksa, aynı Space reposundan indir.
116
- REPO_ID otomatik olarak SPACE_ID (owner/space) ile doldurulur.
117
- """
118
- local_path = APP_ROOT / rel_path
119
- if local_path.exists():
120
- return local_path
121
-
122
- local_path.parent.mkdir(parents=True, exist_ok=True)
123
 
124
- repo_id = os.environ.get("REPO_ID") or os.environ.get("SPACE_ID")
125
- if not repo_id:
126
- raise FileNotFoundError(
127
- f"'{rel_path}' bulunamadı ve REPO_ID/SPACE_ID çevre değişkeni setli değil."
128
- )
129
 
 
130
  try:
131
- # hf_hub_download hedef dosyayı tmp'e indirir; biz local_path'e kopyalarız
132
- downloaded = hf_hub_download(
133
- hf_hub_download(
134
  repo_id=repo_id,
135
  repo_type="space",
136
  filename=remote_path,
137
  local_dir=os.path.dirname(local_path),
138
  local_dir_use_symlinks=False,
139
  force_download=True
140
- )
141
- )
142
- return Path(downloaded)
143
- except Exception as e:
144
- raise FileNotFoundError(
145
- f"'{rel_path}' indirilemedi veya bulunamadı. Lütfen bu dosyayı Space deposunda "
146
- f"'vectorstore/' klasörüne yükleyin. Ayrıntı: {e}"
147
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148
 
149
- def load_vectorstore(vstore_dir: str | Path = VSTORE_DIR) -> Tuple[faiss.Index, List[Dict]]:
150
- """
151
- Önce yerelden yükler; yoksa aynı Space’ten indirip kaydeder.
152
- app.py içinden hiçbir ek değişiklik yapmadan çalışır.
153
- """
154
- vstore_dir = Path(vstore_dir)
155
- faiss_rel = str(Path("vectorstore") / FAISS_FILE)
156
- meta_rel = str(Path("vectorstore") / META_JSONL)
157
 
158
- # Yereli sağlama/alma
159
- faiss_path = _ensure_local_file(faiss_rel)
160
- meta_path = _ensure_local_file(meta_rel)
161
 
162
- # FAISS index
163
- index = faiss.read_index(str(faiss_path))
 
 
 
 
 
 
164
  try:
165
  index.hnsw.efSearch = HNSW_EFSEARCH
166
  except Exception:
167
  pass
168
 
169
- # Kayıtları oku
170
- records: List[Dict] = []
171
  with open(meta_path, "r", encoding="utf-8") as f:
172
  for line in f:
173
  obj = json.loads(line)
174
- records.append({
175
- "text": obj.get("text", ""),
176
- "title": (obj.get("metadata") or {}).get("title", ""),
177
- "source": (obj.get("metadata") or {}).get("source", ""),
178
- })
179
 
180
  if not records:
181
  raise RuntimeError("meta.jsonl boş görünüyor.")
182
  return index, records
183
 
184
  # =========================
185
- # Anahtar kelime çıkarımı + lexical puan
186
- # =========================
187
-
188
- _CAP_WORD = re.compile(r"\b([A-ZÇĞİIÖŞÜ][a-zçğıiöşü]+(?:\s+[A-ZÇĞİIÖŞÜ][a-zçğıiöşü]+)*)\b")
189
-
190
- def _keywords_from_query(q: str) -> List[str]:
191
- q = (q or "").strip()
192
- caps = [m.group(1) for m in _CAP_WORD.finditer(q)]
193
- nums = re.findall(r"\b\d{3,4}\b", q)
194
- base = re.findall(r"[A-Za-zÇĞİIÖŞÜçğıiöşü]+", q)
195
- base = [w.lower() for w in base if len(w) > 2]
196
- return list(dict.fromkeys(caps + nums + base))
197
-
198
- def _lexical_overlap(q_tokens: List[str], text: str) -> float:
199
- toks = re.findall(r"[A-Za-zÇĞİIÖŞÜçğıiöşü]+", (text or "").lower())
200
- if not toks:
201
- return 0.0
202
- qset = set([t for t in q_tokens if len(t) > 2])
203
- tset = set([t for t in toks if len(t) > 2])
204
- inter = len(qset & tset)
205
- denom = len(qset) or 1
206
- return inter / denom
207
-
208
- # =========================
209
- # Retrieval + (koşullu) rerank
210
  # =========================
211
-
212
  @lru_cache(maxsize=256)
213
  def _cached_query_vec(e5_query: str) -> np.ndarray:
214
  v = embed([e5_query]).astype("float32")
215
  return v
216
 
217
- def search_chunks(
218
- query: str,
219
- index: faiss.Index,
220
- records: List[Dict],
221
- top_k: int = TOP_K_DEFAULT,
222
- fetch_k: int = FETCH_K_DEFAULT,
223
- ) -> List[Dict]:
224
- q = (query or "").strip()
225
- q_vec = _cached_query_vec("query: " + q)
226
  faiss.normalize_L2(q_vec)
227
-
228
  scores, idxs = index.search(q_vec, fetch_k)
229
 
230
- pool: List[Dict] = []
231
  for i, s in zip(idxs[0], scores[0]):
232
  if 0 <= i < len(records):
233
- r = records[i]
234
- pool.append({
235
- "text": r["text"],
236
- "title": r.get("title", ""),
237
- "source": r.get("source", ""),
238
- "score_vec": float(s),
239
- })
240
- if not pool:
241
- return []
242
-
243
- q_tokens = _keywords_from_query(q)
244
- q_tokens_lower = [t.lower() for t in q_tokens]
245
- for p in pool:
246
- title = (p.get("title") or "").lower()
247
- title_hit = any(tok.lower() in title for tok in q_tokens if tok and tok[0].isupper())
248
- title_boost = W_TITLE_BOOST if title_hit else 0.0
249
- lex = _lexical_overlap(q_tokens_lower, p["text"]) * W_LEXICAL
250
- p["score_boosted"] = p["score_vec"] + title_boost + lex
251
-
252
- pool.sort(key=lambda x: x["score_boosted"], reverse=True)
253
-
254
- if len(pool) >= 2:
255
- top1, top2 = pool[0]["score_boosted"], pool[1]["score_boosted"]
256
- else:
257
- top1, top2 = pool[0]["score_boosted"], 0.0
258
- do_rerank = not (top1 >= HIGH_SCORE_THRES and (top1 - top2) >= MARGIN_THRES)
259
-
260
- if do_rerank:
261
- rs = rerank(q, [p["text"] for p in pool])
262
- for p, r in zip(pool, rs):
263
- p["score_rerank"] = float(r)
264
- pool.sort(key=lambda x: (x.get("score_rerank", 0.0), x["score_boosted"]), reverse=True)
265
-
266
- return pool[:top_k]
267
 
268
- # =========================
269
- # LLM bağlamı ve kaynak listesi
270
- # =========================
271
-
272
- def _format_sources(hits: List[Dict]) -> str:
273
- seen, urls = set(), []
274
- for h in hits:
275
- u = (h.get("source") or "").strip()
276
- if u and u not in seen:
277
- urls.append(u)
278
- seen.add(u)
279
- return "\n".join(f"- {u}" for u in urls) if urls else "- (yok)"
280
-
281
- def _llm_context(hits: List[Dict], limit: int = CTX_CHAR_LIMIT) -> str:
282
- ctx, total = [], 0
283
- for i, h in enumerate(hits, 1):
284
- block = f"[{i}] {h.get('title','')} — {h.get('source','')}\n{h.get('text','')}"
285
- if total + len(block) > limit:
286
- break
287
- ctx.append(block)
288
- total += len(block)
289
- return "\n\n---\n\n".join(ctx)
290
 
291
- # =========================
292
- # Nihai cevap (kural → QA → LLM → fallback)
293
- # =========================
294
-
295
- def generate_answer(
296
- query: str,
297
- index: faiss.Index,
298
- records: List[Dict],
299
- top_k: int = TOP_K_DEFAULT,
300
- ) -> str:
301
  hits = search_chunks(query, index, records, top_k=top_k)
302
  if not hits:
303
  return "Bilgi bulunamadı."
304
 
305
- rule_sent, rule_src = _extract_fact_sentence(query, hits)
306
- if rule_sent:
307
- return f"{rule_sent}\n\nKaynaklar:\n- {rule_src if rule_src else _format_sources(hits)}"
308
-
309
- best = {"answer": None, "score": 0.0, "src": None}
310
- for h in hits[:QA_PER_PASSAGES]:
311
- try:
312
- qa = qa_extract(query, h["text"])
313
- except Exception:
314
- qa = None
315
- if qa and qa.get("answer"):
316
- score = float(qa.get("score", 0.0))
317
- ans = qa["answer"].strip()
318
- if re.search(r"\b(19\d{2}|20\d{2}|Atatürk|Gökçen|Kemal|Ankara|Fenerbahçe)\b", ans, flags=re.IGNORECASE):
319
- score += 0.30
320
- if len(ans.split()) <= 2:
321
- ans = _expand_named_span(ans, hits)
322
- if score > best["score"]:
323
- best = {"answer": ans, "score": score, "src": h.get("source")}
324
-
325
- if best["answer"] and best["score"] >= QA_SCORE_THRES:
326
- final = best["answer"].strip()
327
- if any(k in (query or "").lower() for k in ["kimdir", "kim"]):
328
- if not final.endswith("."):
329
- final += "."
330
- final = f"{final} {query.rstrip('?')} sorusunun yanıtıdır."
331
- src_line = f"Kaynaklar:\n- {best['src']}" if best["src"] else "Kaynaklar:\n" + _format_sources(hits)
332
- return f"{final}\n\n{src_line}"
333
-
334
- context = _llm_context(hits)
335
  prompt = (
336
- "Aşağıdaki BAĞLAM Wikipedia parçalarından alınmıştır.\n"
337
- "Sadece bu bağlamdan yararlanarak soruya kısa, net ve doğru bir Türkçe cevap ver.\n"
338
- "Uydurma yapma, sadece metinlerde geçen bilgileri kullan.\n\n"
339
- f"Soru:\n{query}\n\nBağlam:\n{context}\n\nYanıtı 1-2 cümlede ver."
340
  )
341
  llm_ans = (generate(prompt) or "").strip()
342
-
343
- if (not llm_ans) or ("yapılandırılmadı" in llm_ans.lower()):
344
- text = hits[0].get("text", "")
345
- first = re.split(r"(?<=[.!?])\s+", text.strip())[:2]
346
- llm_ans = " ".join(first).strip() or "Verilen bağlamda bu sorunun cevabı bulunmamaktadır."
347
-
348
- if "Kaynaklar:" not in llm_ans:
349
- llm_ans += "\n\nKaynaklar:\n" + _format_sources(hits)
350
- return llm_ans
351
-
352
- # =========================
353
- # Hızlı test
354
- # =========================
355
-
356
- if __name__ == "__main__":
357
- idx, recs = load_vectorstore()
358
- for q in [
359
- "Atatürk ne zaman öldü?",
360
- "Türkiye'nin ilk cumhurbaşkanı kimdir?",
361
- "Fenerbahçe ne zaman kuruldu?",
362
- "Türkiye'nin başkenti neresidir?",
363
- "Türkiye'nin ilk kadın pilotu kimdir?",
364
- ]:
365
- print("Soru:", q)
366
- print(generate_answer(q, idx, recs, top_k=TOP_K_DEFAULT))
367
- print("-" * 80)
 
1
  # rag_pipeline.py
2
+ import os, json, re, shutil, requests
3
  from typing import List, Dict, Tuple
4
  from functools import lru_cache
 
 
5
  import faiss
6
  import numpy as np
7
+ from huggingface_hub import hf_hub_download, hf_hub_url
 
8
  from providers import embed, generate, rerank, qa_extract
9
 
10
  # =========================
11
+ # Genel ayarlar
12
  # =========================
13
+ HNSW_EFSEARCH = 32
14
+ TOP_K_DEFAULT = 4
15
+ FETCH_K_DEFAULT = 16
16
+ CTX_CHAR_LIMIT = 1400
17
+ QA_SCORE_THRES = 0.25
18
+ QA_PER_PASSAGES = 4
19
+ HIGH_SCORE_THRES = 0.78
20
+ MARGIN_THRES = 0.06
21
+ W_TITLE_BOOST = 0.25
22
+ W_LEXICAL = 0.15
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
  # =========================
25
+ # Yardımcı regex’ler
26
  # =========================
 
27
  DATE_RX = re.compile(
28
  r"\b(\d{1,2}\s+(Ocak|Şubat|Mart|Nisan|Mayıs|Haziran|Temmuz|Ağustos|Eylül|Ekim|Kasım|Aralık)\s+\d{3,4}"
29
  r"|\d{1,2}\.\d{1,2}\.\d{2,4}"
30
  r"|\d{4})\b",
31
  flags=re.IGNORECASE,
32
  )
33
+ DEATH_KEYS = ["öldü", "vefat", "ölümü", "ölüm"]
34
+ FOUND_KEYS = ["kuruldu", "kuruluş", "kurulmuştur"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
 
36
  # =========================
37
+ # Yardımcı fonksiyonlar
38
  # =========================
39
+ def _split_sentences(txt: str) -> List[str]:
40
+ return [p.strip() for p in re.split(r"(?<=[.!?])\s+", (txt or "").strip()) if p.strip()]
41
 
42
+ def _try_local_paths(vstore_dir: str) -> Tuple[str, str]:
43
+ faiss_file = os.path.join(vstore_dir, "index.faiss")
44
+ meta_file = os.path.join(vstore_dir, "meta.jsonl")
45
+ if os.path.exists(faiss_file) and os.path.exists(meta_file):
46
+ return faiss_file, meta_file
47
+ return "", ""
 
 
 
 
48
 
49
+ def _download_from_hub(repo_id: str, remote_path: str, local_path: str) -> bool:
50
+ """FAISS veya meta dosyasını Hugging Face Space deposundan indirir."""
51
+ os.makedirs(os.path.dirname(local_path), exist_ok=True)
 
 
52
 
53
+ # 1) hf_hub_download dene
54
  try:
55
+ tmp = hf_hub_download(
 
 
56
  repo_id=repo_id,
57
  repo_type="space",
58
  filename=remote_path,
59
  local_dir=os.path.dirname(local_path),
60
  local_dir_use_symlinks=False,
61
  force_download=True
 
 
 
 
 
 
 
62
  )
63
+ if os.path.abspath(tmp) != os.path.abspath(local_path):
64
+ shutil.copyfile(tmp, local_path)
65
+ return os.path.exists(local_path)
66
+ except Exception:
67
+ pass
68
+
69
+ # 2) fallback: doğrudan URL
70
+ try:
71
+ url = hf_hub_url(repo_id=repo_id, filename=remote_path, repo_type="space")
72
+ with requests.get(url, stream=True, timeout=60) as r:
73
+ r.raise_for_status()
74
+ with open(local_path, "wb") as f:
75
+ for chunk in r.iter_content(1024 * 1024):
76
+ if chunk:
77
+ f.write(chunk)
78
+ return os.path.exists(local_path) and os.path.getsize(local_path) > 0
79
+ except Exception:
80
+ return False
81
+
82
+ # =========================
83
+ # Vektör deposu yükleme
84
+ # =========================
85
+ def load_vectorstore(vstore_dir: str = "vectorstore") -> Tuple[faiss.Index, List[Dict]]:
86
+ os.makedirs(vstore_dir, exist_ok=True)
87
 
88
+ faiss_path, meta_path = _try_local_paths(vstore_dir)
89
+ if not (faiss_path and meta_path):
90
+ repo_id = os.getenv("HF_SPACE_REPO_ID") or "ecceembusra/turkish-wikipedia-rag"
91
+ faiss_path = os.path.join(vstore_dir, "index.faiss")
92
+ meta_path = os.path.join(vstore_dir, "meta.jsonl")
 
 
 
93
 
94
+ ok1 = _download_from_hub(repo_id, "vectorstore/index.faiss", faiss_path)
95
+ ok2 = _download_from_hub(repo_id, "vectorstore/meta.jsonl", meta_path)
 
96
 
97
+ if not (ok1 and ok2):
98
+ raise FileNotFoundError(
99
+ "'vectorstore/index.faiss' indirilemedi veya bulunamadı.\n"
100
+ "Lütfen bu dosyaları Space deposunda 'vectorstore/' klasörüne yükleyin "
101
+ "veya Settings → Variables & secrets altında HF_SPACE_REPO_ID değişkenini ekleyin."
102
+ )
103
+
104
+ index = faiss.read_index(faiss_path)
105
  try:
106
  index.hnsw.efSearch = HNSW_EFSEARCH
107
  except Exception:
108
  pass
109
 
110
+ records = []
 
111
  with open(meta_path, "r", encoding="utf-8") as f:
112
  for line in f:
113
  obj = json.loads(line)
114
+ records.append({"text": obj.get("text", ""), "metadata": obj.get("metadata", {})})
 
 
 
 
115
 
116
  if not records:
117
  raise RuntimeError("meta.jsonl boş görünüyor.")
118
  return index, records
119
 
120
  # =========================
121
+ # Arama ve cevaplama
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
  # =========================
 
123
  @lru_cache(maxsize=256)
124
  def _cached_query_vec(e5_query: str) -> np.ndarray:
125
  v = embed([e5_query]).astype("float32")
126
  return v
127
 
128
+ def search_chunks(query: str, index: faiss.Index, records: List[Dict], top_k: int = TOP_K_DEFAULT, fetch_k: int = FETCH_K_DEFAULT) -> List[Dict]:
129
+ q_vec = _cached_query_vec("query: " + query)
 
 
 
 
 
 
 
130
  faiss.normalize_L2(q_vec)
 
131
  scores, idxs = index.search(q_vec, fetch_k)
132
 
133
+ pool = []
134
  for i, s in zip(idxs[0], scores[0]):
135
  if 0 <= i < len(records):
136
+ md = records[i]["metadata"]
137
+ pool.append({"text": records[i]["text"], "title": md.get("title", ""), "source": md.get("source", ""), "score_vec": float(s)})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
 
139
+ return sorted(pool, key=lambda x: x["score_vec"], reverse=True)[:top_k]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140
 
141
+ def generate_answer(query: str, index: faiss.Index, records: List[Dict], top_k: int = TOP_K_DEFAULT) -> str:
 
 
 
 
 
 
 
 
 
142
  hits = search_chunks(query, index, records, top_k=top_k)
143
  if not hits:
144
  return "Bilgi bulunamadı."
145
 
146
+ context = "\n\n".join(h["text"] for h in hits)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
  prompt = (
148
+ "Aşağıdaki Wikipedia parçalarına dayanarak soruyu yanıtla. "
149
+ "Uydurma bilgi verme, sadece metindeki veriyi kullan.\n\n"
150
+ f"Soru: {query}\n\nBağlam:\n{context}\n\nYanıt:"
 
151
  )
152
  llm_ans = (generate(prompt) or "").strip()
153
+ return llm_ans or "Verilen bağlamda yanıt bulunamadı."