File size: 15,236 Bytes
500cf95
358773d
 
500cf95
 
 
 
 
 
358773d
500cf95
 
 
 
 
 
 
 
 
 
 
 
358773d
500cf95
 
 
 
358773d
5838fbc
358773d
5838fbc
 
 
500cf95
358773d
 
 
 
 
500cf95
358773d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a212f5f
bbb1f8c
358773d
bbb1f8c
 
358773d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
500cf95
 
 
358773d
500cf95
358773d
500cf95
 
 
 
 
 
 
358773d
500cf95
358773d
500cf95
 
 
 
 
 
358773d
500cf95
 
 
 
 
358773d
 
500cf95
 
 
358773d
500cf95
358773d
 
500cf95
358773d
500cf95
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fba01f9
 
 
 
 
 
 
 
 
 
 
500cf95
 
fba01f9
500cf95
fba01f9
500cf95
 
 
 
358773d
500cf95
358773d
500cf95
 
 
358773d
500cf95
 
358773d
 
fba01f9
358773d
 
 
 
fba01f9
358773d
 
500cf95
 
 
 
358773d
 
fba01f9
 
358773d
fba01f9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
500cf95
fba01f9
 
500cf95
 
 
fba01f9
500cf95
 
fba01f9
 
500cf95
358773d
500cf95
 
 
 
 
 
 
 
fba01f9
 
500cf95
 
 
 
fba01f9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
500cf95
 
 
fba01f9
500cf95
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
358773d
 
 
500cf95
 
358773d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
500cf95
 
 
 
 
 
358773d
 
 
500cf95
 
358773d
 
 
 
 
 
500cf95
 
358773d
500cf95
 
 
358773d
 
500cf95
 
 
358773d
500cf95
358773d
 
 
 
 
 
 
500cf95
 
358773d
500cf95
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fba01f9
 
500cf95
 
 
 
 
fba01f9
876300e
 
 
 
 
 
500cf95
51abb53
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
"""
Advanced RAG techniques for improved retrieval and generation (Best Case 2025)
Includes: LLM-Based Query Expansion, Cross-Encoder Reranking, Contextual Compression, Hybrid Search
"""

from typing import List, Dict, Optional, Tuple
import numpy as np
from dataclasses import dataclass
import re
from sentence_transformers import CrossEncoder


@dataclass
class RetrievedDocument:
    """Document retrieved from vector database"""
    id: str
    text: str
    confidence: float
    metadata: Dict


class AdvancedRAG:
    """Advanced RAG system with 2025 best practices"""

    def __init__(self, embedding_service, qdrant_service):
        self.embedding_service = embedding_service
        self.qdrant_service = qdrant_service
        
        # Initialize Cross-Encoder for reranking (multilingual for Vietnamese support)
        print("Loading Cross-Encoder model for reranking...")
        # Use multilingual model instead of English-only ms-marco
        self.cross_encoder = CrossEncoder('cross-encoder/mmarco-mMiniLMv2-L12-H384-v1')
        print("✓ Cross-Encoder loaded (multilingual)")

    def expand_query_llm(
        self,
        query: str,
        hf_client=None
    ) -> List[str]:
        """
        Expand query using LLM (Best Case 2025)
        Generates query variations, sub-queries, and hypothetical answers
        
        Args:
            query: Original user query
            hf_client: HuggingFace InferenceClient (optional)
            
        Returns:
            List of expanded queries
        """
        queries = [query]
        
        # Fallback to rule-based if no LLM client
        if not hf_client:
            return self._expand_query_rule_based(query)
        
        try:
            # LLM-based expansion prompt
            expansion_prompt = f"""Given this user question, generate 2-3 alternative phrasings or sub-questions that would help retrieve relevant information.

User Question: {query}

Alternative queries (one per line):"""
            
            # Generate expansions
            response = ""
            for msg in hf_client.chat_completion(
                messages=[{"role": "user", "content": expansion_prompt}],
                max_tokens=256,
                stream=True,
                temperature=0.7,
                model="openai/gpt-oss-20b"
            ):
                if msg.choices and msg.choices[0].delta.content:
                    response += msg.choices[0].delta.content
            
            # Parse expansions
            lines = [line.strip() for line in response.split('\n') if line.strip()]
            # Filter out numbered lists, dashes, etc.
            clean_lines = []
            for line in lines:
                # Remove common list markers
                cleaned = re.sub(r'^[\d\-\*\•]+[\.\)]\s*', '', line)
                if cleaned and len(cleaned) > 5:
                    clean_lines.append(cleaned)
            
            queries.extend(clean_lines[:3])  # Add top 3 expansions
            
        except Exception as e:
            print(f"LLM expansion failed, using rule-based: {e}")
            return self._expand_query_rule_based(query)
        
        return queries[:4]  # Original + 3 expansions

    def _expand_query_rule_based(self, query: str) -> List[str]:
        """
        Fallback rule-based query expansion
        Simple but effective Vietnamese-aware expansion
        """
        queries = [query]

        # Vietnamese question words
        question_words = ['ai', 'gì', 'nào', 'đâu', 'khi nào', 'như thế nào',
                         'sao', 'tại sao', 'có', 'là', 'được', 'không', 'làm sao']

        query_lower = query.lower()
        for qw in question_words:
            if qw in query_lower:
                variant = query_lower.replace(qw, '').strip()
                if variant and variant != query_lower:
                    queries.append(variant)
                    break  # One variation is enough

        # Extract key phrases
        words = query.split()
        if len(words) > 3:
            key_phrases = ' '.join(words[1:]) if words[0].lower() in question_words else ' '.join(words[:3])
            if key_phrases not in queries:
                queries.append(key_phrases)

        return queries[:3]

    def multi_query_retrieval(
        self,
        query: str,
        top_k: int = 5,
        score_threshold: float = 0.5,
        expanded_queries: Optional[List[str]] = None
    ) -> List[RetrievedDocument]:
        """
        Retrieve documents using multiple query variations
        Combines results from all query variations with deduplication
        """
        if expanded_queries is None:
            expanded_queries = [query]

        all_results = {}  # Deduplicate by doc_id

        for q in expanded_queries:
            # Generate embedding for each query variant
            query_embedding = self.embedding_service.encode_text(q)

            # Search in Qdrant
            results = self.qdrant_service.search(
                query_embedding=query_embedding,
                limit=top_k,
                score_threshold=score_threshold
            )

            # Add to results (keep highest score for duplicates)
            for result in results:
                doc_id = result["id"]
                if doc_id not in all_results or result["confidence"] > all_results[doc_id].confidence:
                    # Lấy text từ metadata - hỗ trợ cả "text" (string) và "texts" (array)
                    metadata = result["metadata"]
                    doc_text = metadata.get("text", "")
                    if not doc_text and "texts" in metadata:
                        # Nếu là array, join thành string
                        texts_arr = metadata.get("texts", [])
                        if isinstance(texts_arr, list):
                            doc_text = "\n".join(texts_arr)
                        else:
                            doc_text = str(texts_arr)

                    all_results[doc_id] = RetrievedDocument(
                        id=doc_id,
                        text=doc_text,
                        confidence=result["confidence"],
                        metadata=metadata
                    )

        # Sort by confidence and return top_k
        sorted_results = sorted(all_results.values(), key=lambda x: x.confidence, reverse=True)
        return sorted_results[:top_k * 2]  # Return more for reranking

    def rerank_documents_cross_encoder(
        self,
        query: str,
        documents: List[RetrievedDocument],
        top_k: int = 5
    ) -> List[RetrievedDocument]:
        """
        Rerank documents using Cross-Encoder (Best Case 2025)
        Cross-Encoder provides superior relevance scoring compared to bi-encoders

        Args:
            query: Original user query
            documents: Retrieved documents to rerank
            top_k: Number of top documents to return

        Returns:
            Reranked documents
        """
        if not documents:
            return documents

        # Prepare query-document pairs for Cross-Encoder
        pairs = [[query, doc.text] for doc in documents]

        # Get Cross-Encoder scores (raw logits)
        ce_scores = self.cross_encoder.predict(pairs)
        ce_scores = [float(s) for s in ce_scores]

        # Min-Max normalization để scale về 0-1
        # Thay vì sigmoid (cho điểm rất thấp với logits âm)
        min_score = min(ce_scores)
        max_score = max(ce_scores)

        if max_score - min_score > 0.001:  # Có sự khác biệt giữa các scores
            ce_scores_normalized = [
                (score - min_score) / (max_score - min_score)
                for score in ce_scores
            ]
        else:
            # Tất cả scores gần như bằng nhau -> giữ original confidence
            ce_scores_normalized = [doc.confidence for doc in documents]

        # Combine: 70% Cross-Encoder ranking + 30% original cosine similarity
        # Để giữ lại một phần semantic similarity từ embedding
        reranked = []
        for doc, ce_norm in zip(documents, ce_scores_normalized):
            combined_score = 0.7 * ce_norm + 0.3 * doc.confidence
            reranked.append(RetrievedDocument(
                id=doc.id,
                text=doc.text,
                confidence=float(combined_score),
                metadata=doc.metadata
            ))

        # Sort by combined score
        reranked.sort(key=lambda x: x.confidence, reverse=True)
        return reranked[:top_k]

    def compress_context(
        self,
        query: str,
        documents: List[RetrievedDocument],
        max_tokens: int = 500
    ) -> List[RetrievedDocument]:
        """
        Compress context - giữ nguyên nội dung quan trọng, chỉ truncate nếu quá dài
        KHÔNG dùng word overlap vì nó loại bỏ sai thông tin quan trọng
        """
        compressed_docs = []

        for doc in documents:
            text = doc.text.strip()

            # Chỉ truncate nếu text quá dài (ước tính ~4 chars/token)
            max_chars = max_tokens * 4
            if len(text) > max_chars:
                # Cắt thông minh tại câu gần nhất
                truncated = text[:max_chars]
                last_period = max(
                    truncated.rfind('.'),
                    truncated.rfind('!'),
                    truncated.rfind('?'),
                    truncated.rfind('\n')
                )
                if last_period > max_chars * 0.5:  # Nếu tìm thấy dấu câu ở nửa sau
                    truncated = truncated[:last_period + 1]
                text = truncated.strip()

            compressed_docs.append(RetrievedDocument(
                id=doc.id,
                text=text,
                confidence=doc.confidence,
                metadata=doc.metadata
            ))

        return compressed_docs

    def _split_sentences(self, text: str) -> List[str]:
        """Split text into sentences (Vietnamese-aware)"""
        sentences = re.split(r'[.!?]+', text)
        return [s.strip() for s in sentences if s.strip()]

    def hybrid_rag_pipeline(
        self,
        query: str,
        top_k: int = 5,
        score_threshold: float = 0.5,
        use_reranking: bool = True,
        use_compression: bool = True,
        use_query_expansion: bool = True,
        max_context_tokens: int = 500,
        hf_client=None
    ) -> Tuple[List[RetrievedDocument], Dict]:
        """
        Complete advanced RAG pipeline (Best Case 2025)
        1. LLM-based query expansion
        2. Multi-query retrieval
        3. Cross-Encoder reranking
        4. Contextual compression
        
        Args:
            query: User query
            top_k: Number of documents to return
            score_threshold: Minimum relevance score
            use_reranking: Enable Cross-Encoder reranking
            use_compression: Enable context compression
            use_query_expansion: Enable LLM-based query expansion
            max_context_tokens: Max tokens for compression
            hf_client: HuggingFace InferenceClient for expansion
            
        Returns:
            (documents, stats)
        """
        stats = {
            "original_query": query,
            "expanded_queries": [],
            "initial_results": 0,
            "after_rerank": 0,
            "after_compression": 0,
            "used_cross_encoder": use_reranking,
            "used_llm_expansion": use_query_expansion and hf_client is not None
        }

        # Step 1: Query Expansion (LLM-based or rule-based)
        if use_query_expansion:
            expanded_queries = self.expand_query_llm(query, hf_client)
        else:
            expanded_queries = [query]
        
        stats["expanded_queries"] = expanded_queries

        # Step 2: Multi-query retrieval
        documents = self.multi_query_retrieval(
            query=query,
            top_k=top_k * 2,  # Get more candidates for reranking
            score_threshold=score_threshold,
            expanded_queries=expanded_queries
        )
        stats["initial_results"] = len(documents)

        # Step 3: Cross-Encoder Reranking (Best Case 2025)
        if use_reranking and documents:
            documents = self.rerank_documents_cross_encoder(
                query=query,
                documents=documents,
                top_k=top_k
            )
        else:
            documents = documents[:top_k]
        stats["after_rerank"] = len(documents)

        # Step 4: Contextual compression (optional)
        if use_compression and documents:
            documents = self.compress_context(
                query=query,
                documents=documents,
                max_tokens=max_context_tokens
            )
        stats["after_compression"] = len(documents)

        return documents, stats

    def format_context_for_llm(
        self,
        documents: List[RetrievedDocument],
        include_metadata: bool = True
    ) -> str:
        """
        Format retrieved documents into context string for LLM
        Uses better structure for improved LLM understanding
        """
        if not documents:
            return ""

        context_parts = ["RELEVANT CONTEXT:\n"]

        for i, doc in enumerate(documents, 1):
            context_parts.append(f"\n--- Document {i} (Relevance: {doc.confidence:.2%}) ---")
            context_parts.append(doc.text)

            if include_metadata and doc.metadata:
                # Add useful metadata
                meta_str = []
                for key, value in doc.metadata.items():
                    if key not in ['text', 'texts'] and value:
                        meta_str.append(f"{key}: {value}")
                if meta_str:
                    context_parts.append(f"[Metadata: {', '.join(meta_str)}]")

        context_parts.append("\n--- End of Context ---\n")
        return "\n".join(context_parts)

    def build_rag_prompt(
        self,
        query: str,
        context: str,
        system_message: str = "You are a helpful AI assistant."
    ) -> str:
        """
        Build optimized RAG system prompt for LLM
        Query sẽ được gửi riêng trong user message
        """
        prompt_template = f"""{system_message}

{context}

HƯỚNG DẪN TRẢ LỜI:
1. Đóng vai trò là một trợ lý ảo thân thiện, trả lời tự nhiên bằng tiếng Việt.
2. Dựa vào CONTEXT được cung cấp để trả lời câu hỏi.
3. KHÔNG copy nguyên văn text từ context. Hãy tổng hợp lại thông tin một cách mạch lạc.
4. Bắt đầu câu trả lời bằng các cụm từ tự nhiên như: "Dựa trên dữ liệu tôi tìm thấy...", "Tôi có thông tin về các sự kiện sau...", "Có vẻ như đây là những gì bạn đang tìm...".
5. Nếu có nhiều kết quả, hãy liệt kê ngắn gọn các điểm chính (Tên, Thời gian, Địa điểm).
6. Nếu context không liên quan, hãy lịch sự nói rằng bạn chưa tìm thấy thông tin phù hợp trong hệ thống."""

        return prompt_template