ramailkk commited on
Commit
b4cdea6
·
1 Parent(s): 15c009d

new main without upsert

Browse files
Files changed (1) hide show
  1. main_easy.py +104 -0
main_easy.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ from dotenv import load_dotenv
4
+ from config_loader import cfg
5
+
6
+ # Optimized imports - only what we need for Retrieval and Generation
7
+ from vector_db import get_index_by_name, load_chunks_from_pinecone # Using the new helper
8
+ from retriever.retriever import HybridRetriever
9
+ from retriever.generator import RAGGenerator
10
+ from retriever.processor import ChunkProcessor
11
+ from retriever.evaluator import RAGEvaluator
12
+
13
+ # Model Fleet
14
+ from models.llama_3_8b import Llama3_8B
15
+ from models.mistral_7b import Mistral_7b
16
+ from models.qwen_2_5 import Qwen2_5
17
+ from models.deepseek_v3 import DeepSeek_V3
18
+ from models.tiny_aya import TinyAya
19
+
20
+ MODEL_MAP = {
21
+ "Llama-3-8B": Llama3_8B,
22
+ "Mistral-7B": Mistral_7b,
23
+ "Qwen-2.5": Qwen2_5,
24
+ "DeepSeek-V3": DeepSeek_V3,
25
+ "TinyAya": TinyAya
26
+ }
27
+
28
+ load_dotenv()
29
+
30
+ def main():
31
+ hf_token = os.getenv("HF_TOKEN")
32
+ pinecone_key = os.getenv("PINECONE_API_KEY")
33
+ query = "How do transformers handle long sequences?"
34
+
35
+ # 1. Connect to Existing Index (No creation, no uploading)
36
+ # We use the slugified name directly or via config
37
+ index_name = f"{cfg.db['base_index_name']}-{cfg.processing['technique']}"
38
+ index = get_index_by_name(pinecone_key, index_name)
39
+
40
+ # 2. Setup Processor (Required for the Encoder/Embedding model)
41
+ proc = ChunkProcessor(model_name=cfg.processing['embedding_model'])
42
+
43
+ # 3. Load BM25 Corpus (The "Source of Truth")
44
+ # This replaces the entire data_loader/chunking block
45
+ # Note: On first run, this hits Pinecone. Use a pickle cache here for 0s delay.
46
+ print("🔄 Loading BM25 context from Pinecone metadata...")
47
+ final_chunks = load_chunks_from_pinecone(index)
48
+
49
+ # 4. Retrieval Setup
50
+ retriever = HybridRetriever(final_chunks, proc.encoder)
51
+
52
+ print(f"🔎 Searching via {cfg.retrieval['mode']} mode...")
53
+ context_chunks = retriever.search(
54
+ query, index,
55
+ mode=cfg.retrieval['mode'],
56
+ rerank_strategy=cfg.retrieval['rerank_strategy'],
57
+ use_mmr=cfg.retrieval['use_mmr'],
58
+ top_k=cfg.retrieval['top_k'],
59
+ final_k=cfg.retrieval['final_k']
60
+ )
61
+
62
+ # 5. Initialization of Contestants
63
+ rag_engine = RAGGenerator()
64
+ models = {name: MODEL_MAP[name](token=hf_token) for name in cfg.model_list}
65
+
66
+ evaluator = RAGEvaluator(
67
+ judge_model=cfg.gen['judge_model'],
68
+ embedding_model=proc.encoder,
69
+ api_key=os.getenv("GROQ_API_KEY")
70
+ )
71
+
72
+ tournament_results = {}
73
+
74
+ # 6. Tournament Loop
75
+ for name, model_inst in models.items():
76
+ print(f"\n🏆 Tournament: {name} is generating...")
77
+ try:
78
+ # Generation
79
+ answer = rag_engine.get_answer(
80
+ model_inst, query, context_chunks,
81
+ temperature=cfg.gen['temperature']
82
+ )
83
+
84
+ # Faithfulness Evaluation
85
+ faith = evaluator.evaluate_faithfulness(answer, context_chunks)
86
+ # Relevancy Evaluation
87
+ rel = evaluator.evaluate_relevancy(query, answer)
88
+
89
+ tournament_results[name] = {
90
+ "Answer": answer[:100] + "...", # Preview
91
+ "Faithfulness": faith['score'],
92
+ "Relevancy": rel['score']
93
+ }
94
+ print(f"✅ {name} Score - Faith: {faith['score']} | Rel: {rel['score']}")
95
+
96
+ except Exception as e:
97
+ print(f"❌ Error evaluating {name}: {e}")
98
+
99
+ print("\n--- Final Tournament Standings ---")
100
+ for name, scores in tournament_results.items():
101
+ print(f"{name}: F={scores['Faithfulness']}, R={scores['Relevancy']}")
102
+
103
+ if __name__ == "__main__":
104
+ main()